From 99e9e9ea0e89d409c73739b4f43d395a4e606449 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 May 2024 06:59:15 +0000 Subject: [PATCH 1/2] Bump github.com/shipwright-io/build from 0.12.0 to 0.13.0 Bumps [github.com/shipwright-io/build](https://github.com/shipwright-io/build) from 0.12.0 to 0.13.0. - [Release notes](https://github.com/shipwright-io/build/releases) - [Commits](https://github.com/shipwright-io/build/compare/v0.12.0...v0.13.0) --- updated-dependencies: - dependency-name: github.com/shipwright-io/build dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 57 +- go.sum | 121 +- .../cloudflare/circl/ecc/goldilocks/twist.go | 2 +- .../cloudflare/circl/internal/sha3/keccakf.go | 12 +- .../cloudflare/circl/internal/sha3/sha3.go | 11 +- .../cloudflare/circl/internal/sha3/shake.go | 40 + .../cloudflare/circl/math/primes.go | 34 + .../cloudflare/circl/sign/ed25519/ed25519.go | 2 +- .../golang_protobuf_extensions/LICENSE | 201 - .../golang_protobuf_extensions/NOTICE | 1 - .../pbutil/.gitignore | 1 - .../pbutil/Makefile | 7 - .../pbutil/decode.go | 75 - .../pbutil/encode.go | 46 - vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 16 + vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 4 +- .../ginkgo/v2/reporters/default_reporter.go | 47 +- .../github.com/onsi/ginkgo/v2/types/config.go | 5 +- .../github.com/onsi/ginkgo/v2/types/flags.go | 15 +- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 13 + vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- .../client_golang/prometheus/histogram.go | 56 +- .../client_golang/prometheus/labels.go | 2 + .../prometheus/process_collector_other.go | 4 +- .../prometheus/process_collector_wasip1.go} | 20 +- .../prometheus/client_model/go/metrics.pb.go | 195 +- .../prometheus/common/expfmt/decode.go | 27 +- .../prometheus/common/expfmt/encode.go | 78 +- .../prometheus/common/expfmt/expfmt.go | 144 +- .../common/expfmt/openmetrics_create.go | 85 +- .../prometheus/common/expfmt/text_create.go | 118 +- .../prometheus/common/expfmt/text_parse.go | 8 +- .../prometheus/common/model/alert.go | 4 +- .../prometheus/common/model/labels.go | 22 +- .../prometheus/common/model/metadata.go | 28 + .../prometheus/common/model/metric.go | 368 +- .../prometheus/common/model/signature.go | 6 +- .../prometheus/common/model/silence.go | 2 +- .../prometheus/common/model/value.go | 16 +- .../prometheus/common/model/value_float.go | 14 +- .../prometheus/procfs/Makefile.common | 2 +- .../prometheus/procfs/fs_statfs_notype.go | 4 +- .../prometheus/procfs/fs_statfs_type.go | 4 +- .../prometheus/procfs/mountstats.go | 83 +- .../prometheus/procfs/proc_fdinfo.go | 8 +- .../github.com/prometheus/procfs/proc_maps.go | 20 +- .../prometheus/procfs/proc_status.go | 21 +- .../apis/build/v1alpha1/build_conversion.go | 24 - .../pkg/apis/build/v1alpha1/build_types.go | 22 +- .../build/v1alpha1/buildrun_conversion.go | 24 - .../pkg/apis/build/v1alpha1/buildrun_types.go | 9 +- .../v1alpha1/buildstrategy_conversion.go | 24 - .../build/v1alpha1/buildstrategy_types.go | 1 - .../clusterbuildstrategy_conversion.go | 24 - .../v1alpha1/clusterbuildstrategy_types.go | 1 - .../build/v1alpha1/zz_generated.deepcopy.go | 9 + .../apis/build/v1beta1/build_conversion.go | 93 +- .../pkg/apis/build/v1beta1/build_types.go | 53 +- .../apis/build/v1beta1/buildrun_conversion.go | 89 +- .../pkg/apis/build/v1beta1/buildrun_types.go | 13 +- .../build/v1beta1/buildstrategy_conversion.go | 4 +- .../apis/build/v1beta1/buildstrategy_types.go | 1 + .../clusterbuildstrategy_conversion.go | 4 +- .../v1beta1/clusterbuildstrategy_types.go | 1 + .../build/pkg/apis/build/v1beta1/source.go | 36 +- .../build/v1beta1/zz_generated.deepcopy.go | 36 +- .../pipeline/pkg/apis/config/contexts.go | 48 - .../pipeline/pkg/apis/config/default.go | 5 +- .../pipeline/pkg/apis/config/events.go | 185 + .../pipeline/pkg/apis/config/feature_flags.go | 101 +- .../pipeline/pkg/apis/config/metrics.go | 3 + .../pipeline/pkg/apis/config/spire_config.go | 3 + .../pipeline/pkg/apis/config/store.go | 29 +- .../pkg/apis/config/zz_generated.deepcopy.go | 23 + .../pipeline/{storagetypes.go => constant.go} | 9 +- .../pkg/apis/pipeline/v1/matrix_types.go | 16 +- .../pkg/apis/pipeline/v1/openapi_generated.go | 4 +- .../pkg/apis/pipeline/v1/param_types.go | 114 +- .../pkg/apis/pipeline/v1/pipeline_types.go | 1 - .../apis/pipeline/v1/pipeline_validation.go | 212 +- .../apis/pipeline/v1/pipelinerun_defaults.go | 15 +- .../pkg/apis/pipeline/v1/pipelinerun_types.go | 30 + .../pipeline/v1/pipelinerun_validation.go | 82 +- .../pkg/apis/pipeline/v1/result_validation.go | 14 +- .../pkg/apis/pipeline/v1/resultref.go | 11 +- .../pkg/apis/pipeline/v1/swagger.json | 4 +- .../pkg/apis/pipeline/v1/task_validation.go | 248 +- .../pkg/apis/pipeline/v1/taskref_types.go | 3 + .../apis/pipeline/v1/taskref_validation.go | 14 +- .../pkg/apis/pipeline/v1/taskrun_defaults.go | 8 + .../pkg/apis/pipeline/v1/taskrun_types.go | 8 +- .../apis/pipeline/v1/taskrun_validation.go | 10 +- .../pkg/apis/pipeline/v1/when_types.go | 4 +- .../apis/pipeline/v1/workspace_validation.go | 12 - .../v1beta1/cluster_task_validation.go | 7 +- .../pipeline/v1beta1/container_conversion.go | 4 - .../apis/pipeline/v1beta1/customrun_types.go | 7 +- .../pkg/apis/pipeline/v1beta1/matrix_types.go | 16 +- .../pipeline/v1beta1/openapi_generated.go | 28 +- .../apis/pipeline/v1beta1/param_conversion.go | 3 +- .../pkg/apis/pipeline/v1beta1/param_types.go | 98 +- .../pipeline/v1beta1/pipeline_conversion.go | 42 +- .../apis/pipeline/v1beta1/pipeline_types.go | 5 +- .../pipeline/v1beta1/pipeline_validation.go | 149 +- .../v1beta1/pipelinerun_conversion.go | 30 +- .../pipeline/v1beta1/pipelinerun_defaults.go | 15 +- .../pipeline/v1beta1/pipelinerun_types.go | 3 +- .../v1beta1/pipelinerun_validation.go | 9 +- .../pipeline/v1beta1/resolver_conversion.go | 2 +- .../pipeline/v1beta1/result_conversion.go | 20 +- .../pipeline/v1beta1/result_validation.go | 6 - .../pkg/apis/pipeline/v1beta1/swagger.json | 22 +- .../apis/pipeline/v1beta1/task_conversion.go | 182 +- .../pkg/apis/pipeline/v1beta1/task_types.go | 41 +- .../apis/pipeline/v1beta1/task_validation.go | 216 +- .../pipeline/v1beta1/taskref_conversion.go | 3 +- .../pipeline/v1beta1/taskref_validation.go | 24 +- .../pipeline/v1beta1/taskrun_conversion.go | 34 +- .../apis/pipeline/v1beta1/taskrun_defaults.go | 8 + .../apis/pipeline/v1beta1/taskrun_types.go | 11 +- .../pipeline/v1beta1/taskrun_validation.go | 10 +- .../pkg/apis/pipeline/v1beta1/when_types.go | 4 +- .../pipeline/v1beta1/workspace_conversion.go | 3 +- .../pipeline/pkg/apis/version/conversion.go | 2 +- .../pipeline/pkg/substitution/substitution.go | 141 +- vendor/go.uber.org/zap/.golangci.yml | 2 +- vendor/go.uber.org/zap/.readme.tmpl | 10 +- vendor/go.uber.org/zap/CHANGELOG.md | 54 +- .../go.uber.org/zap/{LICENSE.txt => LICENSE} | 0 vendor/go.uber.org/zap/README.md | 66 +- vendor/go.uber.org/zap/buffer/buffer.go | 2 +- vendor/go.uber.org/zap/field.go | 2 + vendor/go.uber.org/zap/logger.go | 39 +- vendor/go.uber.org/zap/options.go | 15 + vendor/go.uber.org/zap/sugar.go | 39 + .../zap/zapcore/console_encoder.go | 2 +- vendor/go.uber.org/zap/zapcore/encoder.go | 15 + vendor/go.uber.org/zap/zapcore/field.go | 2 +- .../go.uber.org/zap/zapcore/json_encoder.go | 2 +- vendor/golang.org/x/net/html/token.go | 12 +- vendor/golang.org/x/net/http2/frame.go | 42 +- vendor/golang.org/x/net/http2/pipe.go | 11 +- vendor/golang.org/x/net/http2/server.go | 13 +- vendor/golang.org/x/net/http2/testsync.go | 331 + vendor/golang.org/x/net/http2/transport.go | 307 +- vendor/golang.org/x/oauth2/deviceauth.go | 198 + vendor/golang.org/x/oauth2/internal/token.go | 70 +- vendor/golang.org/x/oauth2/oauth2.go | 33 +- vendor/golang.org/x/oauth2/pkce.go | 68 + vendor/golang.org/x/oauth2/token.go | 2 +- vendor/golang.org/x/sys/unix/aliases.go | 2 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 2 +- vendor/golang.org/x/sys/unix/mmap_nomremap.go | 2 +- .../x/sys/unix/syscall_darwin_libSystem.go | 2 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 12 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 99 + .../x/sys/unix/syscall_zos_s390x.go | 8 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 36 +- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 3 + .../x/sys/unix/zerrors_linux_loong64.go | 3 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../golang.org/x/sys/unix/zsyscall_linux.go | 10 + .../x/sys/unix/zsysnum_linux_386.go | 4 + .../x/sys/unix/zsysnum_linux_amd64.go | 3 + .../x/sys/unix/zsysnum_linux_arm.go | 4 + .../x/sys/unix/zsysnum_linux_arm64.go | 4 + .../x/sys/unix/zsysnum_linux_loong64.go | 4 + .../x/sys/unix/zsysnum_linux_mips.go | 4 + .../x/sys/unix/zsysnum_linux_mips64.go | 4 + .../x/sys/unix/zsysnum_linux_mips64le.go | 4 + .../x/sys/unix/zsysnum_linux_mipsle.go | 4 + .../x/sys/unix/zsysnum_linux_ppc.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 + .../x/sys/unix/zsysnum_linux_riscv64.go | 4 + .../x/sys/unix/zsysnum_linux_s390x.go | 4 + .../x/sys/unix/zsysnum_linux_sparc64.go | 4 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 185 +- .../golang.org/x/sys/windows/env_windows.go | 17 +- .../x/sys/windows/syscall_windows.go | 85 +- .../golang.org/x/sys/windows/types_windows.go | 24 + .../x/sys/windows/zsyscall_windows.go | 126 +- .../encoding/protodelim/protodelim.go | 160 + .../protobuf/encoding/protojson/decode.go | 38 +- .../protobuf/encoding/protojson/doc.go | 2 +- .../protobuf/encoding/protojson/encode.go | 39 +- .../encoding/protojson/well_known_types.go | 59 +- .../protobuf/encoding/prototext/decode.go | 8 +- .../protobuf/encoding/prototext/encode.go | 4 +- .../protobuf/encoding/protowire/wire.go | 28 +- .../protobuf/internal/descfmt/stringer.go | 183 +- .../internal/editiondefaults/defaults.go | 12 + .../editiondefaults/editions_defaults.binpb | 4 + .../protobuf/internal/encoding/json/decode.go | 2 +- .../protobuf/internal/filedesc/desc.go | 102 +- .../protobuf/internal/filedesc/desc_init.go | 52 + .../protobuf/internal/filedesc/desc_lazy.go | 28 + .../protobuf/internal/filedesc/editions.go | 142 + .../protobuf/internal/genid/descriptor_gen.go | 364 +- .../internal/genid/go_features_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 5 + .../protobuf/internal/genid/type_gen.go | 38 + .../protobuf/internal/impl/codec_extension.go | 22 +- .../protobuf/internal/impl/codec_gen.go | 113 +- .../protobuf/internal/impl/codec_tables.go | 2 +- .../protobuf/internal/impl/legacy_message.go | 19 +- .../protobuf/internal/impl/message.go | 17 +- .../internal/impl/message_reflect_field.go | 2 +- .../protobuf/internal/impl/pointer_reflect.go | 36 + .../protobuf/internal/impl/pointer_unsafe.go | 40 + .../protobuf/internal/strs/strings.go | 2 +- ...ings_unsafe.go => strings_unsafe_go120.go} | 4 +- .../internal/strs/strings_unsafe_go121.go | 74 + .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 2 +- .../google.golang.org/protobuf/proto/doc.go | 58 +- .../protobuf/proto/encode.go | 2 +- .../protobuf/proto/extension.go | 2 +- .../google.golang.org/protobuf/proto/merge.go | 2 +- .../google.golang.org/protobuf/proto/proto.go | 18 +- .../protobuf/reflect/protodesc/desc.go | 29 +- .../protobuf/reflect/protodesc/desc_init.go | 56 + .../reflect/protodesc/desc_resolve.go | 4 +- .../reflect/protodesc/desc_validate.go | 6 +- .../protobuf/reflect/protodesc/editions.go | 148 + .../protobuf/reflect/protodesc/proto.go | 18 +- .../protobuf/reflect/protoreflect/proto.go | 85 +- .../reflect/protoreflect/source_gen.go | 64 +- .../protobuf/reflect/protoreflect/type.go | 44 +- .../protobuf/reflect/protoreflect/value.go | 24 +- .../reflect/protoreflect/value_equal.go | 8 +- .../reflect/protoreflect/value_union.go | 44 +- ...{value_unsafe.go => value_unsafe_go120.go} | 4 +- .../protoreflect/value_unsafe_go121.go | 87 + .../reflect/protoregistry/registry.go | 24 +- .../types/descriptorpb/descriptor.pb.go | 2475 +++- .../types/gofeaturespb/go_features.pb.go | 177 + .../types/gofeaturespb/go_features.proto | 28 + .../protobuf/types/known/anypb/any.pb.go | 3 +- .../v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../admissionregistration/v1/generated.pb.go | 484 +- .../admissionregistration/v1/generated.proto | 73 + .../api/admissionregistration/v1/types.go | 73 + .../v1/types_swagger_doc_generated.go | 14 +- .../v1/zz_generated.deepcopy.go | 26 + .../v1alpha1/generated.pb.go | 1565 ++- .../v1alpha1/generated.proto | 228 +- .../admissionregistration/v1alpha1/types.go | 217 +- .../v1alpha1/types_swagger_doc_generated.go | 67 +- .../v1alpha1/zz_generated.deepcopy.go | 113 + .../v1beta1/generated.pb.go | 465 +- .../v1beta1/generated.proto | 73 + .../admissionregistration/v1beta1/types.go | 73 + .../v1beta1/types_swagger_doc_generated.go | 14 +- .../v1beta1/zz_generated.deepcopy.go | 26 + .../v1alpha1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/apps/v1/generated.proto | 5 +- vendor/k8s.io/api/apps/v1/types.go | 5 +- .../apps/v1/types_swagger_doc_generated.go | 10 +- .../k8s.io/api/apps/v1beta1/generated.proto | 57 +- vendor/k8s.io/api/apps/v1beta1/types.go | 57 +- .../v1beta1/types_swagger_doc_generated.go | 60 +- .../k8s.io/api/apps/v1beta2/generated.proto | 7 +- vendor/k8s.io/api/apps/v1beta2/types.go | 7 +- .../v1beta2/types_swagger_doc_generated.go | 12 +- .../v1/types_swagger_doc_generated.go | 2 +- .../authentication/v1alpha1/generated.proto | 3 +- .../api/authentication/v1alpha1/types.go | 5 +- .../v1alpha1/types_swagger_doc_generated.go | 4 +- .../zz_generated.prerelease-lifecycle.go | 6 +- .../authentication/v1beta1/generated.pb.go | 476 +- .../authentication/v1beta1/generated.proto | 21 + .../api/authentication/v1beta1/register.go | 1 + .../api/authentication/v1beta1/types.go | 27 + .../v1beta1/types_swagger_doc_generated.go | 21 +- .../v1beta1/zz_generated.deepcopy.go | 44 + .../zz_generated.prerelease-lifecycle.go | 18 + .../v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/autoscaling/v1/generated.proto | 42 +- vendor/k8s.io/api/autoscaling/v1/types.go | 79 +- .../v1/types_swagger_doc_generated.go | 42 +- .../k8s.io/api/autoscaling/v2/generated.proto | 20 +- vendor/k8s.io/api/autoscaling/v2/types.go | 61 +- .../v2/types_swagger_doc_generated.go | 22 +- .../api/autoscaling/v2beta1/generated.proto | 2 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 2 +- .../v2beta1/types_swagger_doc_generated.go | 4 +- .../api/autoscaling/v2beta2/generated.proto | 24 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 62 +- .../v2beta2/types_swagger_doc_generated.go | 26 +- vendor/k8s.io/api/batch/v1/generated.proto | 28 +- vendor/k8s.io/api/batch/v1/types.go | 42 +- .../batch/v1/types_swagger_doc_generated.go | 28 +- .../k8s.io/api/batch/v1beta1/generated.pb.go | 317 +- .../k8s.io/api/batch/v1beta1/generated.proto | 15 +- vendor/k8s.io/api/batch/v1beta1/register.go | 1 - vendor/k8s.io/api/batch/v1beta1/types.go | 20 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../batch/v1beta1/zz_generated.deepcopy.go | 27 - .../zz_generated.prerelease-lifecycle.go | 18 - vendor/k8s.io/api/certificates/v1/types.go | 3 +- .../v1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/certificates/v1alpha1/doc.go | 24 + .../api/certificates/v1alpha1/generated.pb.go | 831 ++ .../api/certificates/v1alpha1/generated.proto | 103 + .../api/certificates/v1alpha1/register.go | 61 + .../k8s.io/api/certificates/v1alpha1/types.go | 106 + .../v1alpha1/types_swagger_doc_generated.go | 60 + .../v1alpha1/zz_generated.deepcopy.go | 102 + .../zz_generated.prerelease-lifecycle.go | 58 + .../api/certificates/v1beta1/generated.proto | 6 +- .../k8s.io/api/certificates/v1beta1/types.go | 9 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../api/coordination/v1/generated.proto | 6 +- vendor/k8s.io/api/coordination/v1/types.go | 6 +- .../v1/types_swagger_doc_generated.go | 8 +- .../api/coordination/v1beta1/generated.proto | 6 +- .../k8s.io/api/coordination/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 8 +- .../api/core/v1/annotation_key_constants.go | 21 +- vendor/k8s.io/api/core/v1/generated.pb.go | 2800 ++-- vendor/k8s.io/api/core/v1/generated.proto | 145 +- vendor/k8s.io/api/core/v1/toleration.go | 14 +- vendor/k8s.io/api/core/v1/types.go | 214 +- .../core/v1/types_swagger_doc_generated.go | 65 +- .../api/core/v1/zz_generated.deepcopy.go | 40 +- .../k8s.io/api/discovery/v1/generated.proto | 26 +- vendor/k8s.io/api/discovery/v1/types.go | 42 +- .../v1/types_swagger_doc_generated.go | 14 +- .../api/discovery/v1beta1/generated.proto | 13 +- vendor/k8s.io/api/discovery/v1beta1/types.go | 30 +- .../v1beta1/types_swagger_doc_generated.go | 12 +- .../events/v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 11046 +++++----------- .../api/extensions/v1beta1/generated.proto | 289 +- .../k8s.io/api/extensions/v1beta1/register.go | 2 - vendor/k8s.io/api/extensions/v1beta1/types.go | 385 +- .../v1beta1/types_swagger_doc_generated.go | 164 +- .../v1beta1/zz_generated.deepcopy.go | 366 - .../zz_generated.prerelease-lifecycle.go | 48 - .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../v1beta2/types_swagger_doc_generated.go | 2 +- .../v1beta3/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/networking/v1/generated.proto | 176 +- vendor/k8s.io/api/networking/v1/types.go | 188 +- .../v1/types_swagger_doc_generated.go | 120 +- .../api/networking/v1alpha1/generated.pb.go | 1199 +- .../api/networking/v1alpha1/generated.proto | 79 +- .../api/networking/v1alpha1/register.go | 12 +- .../k8s.io/api/networking/v1alpha1/types.go | 86 +- .../v1alpha1/types_swagger_doc_generated.go | 56 +- .../networking/v1alpha1/well_known_labels.go | 33 + .../v1alpha1/zz_generated.deepcopy.go | 97 + .../zz_generated.prerelease-lifecycle.go | 36 + .../api/networking/v1beta1/generated.proto | 78 +- vendor/k8s.io/api/networking/v1beta1/types.go | 87 +- .../v1beta1/types_swagger_doc_generated.go | 72 +- vendor/k8s.io/api/node/v1/generated.proto | 10 +- vendor/k8s.io/api/node/v1/types.go | 12 +- .../node/v1/types_swagger_doc_generated.go | 12 +- .../k8s.io/api/node/v1alpha1/generated.proto | 14 +- vendor/k8s.io/api/node/v1alpha1/types.go | 16 +- .../v1alpha1/types_swagger_doc_generated.go | 14 +- .../k8s.io/api/node/v1beta1/generated.proto | 12 +- vendor/k8s.io/api/node/v1beta1/types.go | 14 +- .../v1beta1/types_swagger_doc_generated.go | 12 +- vendor/k8s.io/api/policy/v1/generated.proto | 4 +- vendor/k8s.io/api/policy/v1/types.go | 4 +- .../policy/v1/types_swagger_doc_generated.go | 4 +- .../k8s.io/api/policy/v1beta1/generated.proto | 4 +- vendor/k8s.io/api/policy/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../rbac/v1/types_swagger_doc_generated.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../resource/{v1alpha1 => v1alpha2}/doc.go | 4 +- .../{v1alpha1 => v1alpha2}/generated.pb.go | 658 +- .../{v1alpha1 => v1alpha2}/generated.proto | 84 +- .../{v1alpha1 => v1alpha2}/register.go | 8 +- .../resource/{v1alpha1 => v1alpha2}/types.go | 92 +- .../types_swagger_doc_generated.go | 56 +- .../zz_generated.deepcopy.go | 61 +- .../k8s.io/api/scheduling/v1/generated.proto | 4 +- vendor/k8s.io/api/scheduling/v1/types.go | 4 +- .../v1/types_swagger_doc_generated.go | 6 +- .../api/scheduling/v1alpha1/generated.proto | 4 +- .../k8s.io/api/scheduling/v1alpha1/types.go | 4 +- .../v1alpha1/types_swagger_doc_generated.go | 6 +- .../api/scheduling/v1beta1/generated.proto | 4 +- vendor/k8s.io/api/scheduling/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- vendor/k8s.io/api/storage/v1/generated.proto | 126 +- vendor/k8s.io/api/storage/v1/types.go | 132 +- .../storage/v1/types_swagger_doc_generated.go | 82 +- .../api/storage/v1alpha1/generated.proto | 38 +- vendor/k8s.io/api/storage/v1alpha1/types.go | 41 +- .../v1alpha1/types_swagger_doc_generated.go | 38 +- .../api/storage/v1beta1/generated.proto | 112 +- vendor/k8s.io/api/storage/v1beta1/types.go | 119 +- .../v1beta1/types_swagger_doc_generated.go | 78 +- .../apis/apiextensions/types_jsonschema.go | 12 + .../pkg/apis/apiextensions/v1/generated.pb.go | 401 +- .../pkg/apis/apiextensions/v1/generated.proto | 19 +- .../pkg/apis/apiextensions/v1/types.go | 6 +- .../apis/apiextensions/v1/types_jsonschema.go | 14 +- .../v1/zz_generated.conversion.go | 2 + .../apiextensions/v1beta1/generated.pb.go | 430 +- .../apiextensions/v1beta1/generated.proto | 13 + .../apiextensions/v1beta1/types_jsonschema.go | 14 +- .../v1beta1/zz_generated.conversion.go | 2 + .../v1/customresourcecolumndefinition.go | 84 + .../v1/customresourceconversion.go | 52 + .../v1/customresourcedefinition.go | 218 + .../v1/customresourcedefinitioncondition.go | 80 + .../v1/customresourcedefinitionnames.go | 88 + .../v1/customresourcedefinitionspec.go | 93 + .../v1/customresourcedefinitionstatus.go | 64 + .../v1/customresourcedefinitionversion.go | 107 + .../v1/customresourcesubresources.go | 52 + .../v1/customresourcesubresourcescale.go | 57 + .../v1/customresourcevalidation.go | 39 + .../apiextensions/v1/externaldocumentation.go | 48 + .../apiextensions/v1/jsonschemaprops.go | 463 + .../apiextensions/v1/servicereference.go | 66 + .../apiextensions/v1/validationrule.go | 57 + .../apiextensions/v1/webhookclientconfig.go | 59 + .../apiextensions/v1/webhookconversion.go | 50 + .../v1beta1/customresourcecolumndefinition.go | 84 + .../v1beta1/customresourceconversion.go | 63 + .../v1beta1/customresourcedefinition.go | 218 + .../customresourcedefinitioncondition.go | 80 + .../v1beta1/customresourcedefinitionnames.go | 88 + .../v1beta1/customresourcedefinitionspec.go | 134 + .../v1beta1/customresourcedefinitionstatus.go | 64 + .../customresourcedefinitionversion.go | 107 + .../v1beta1/customresourcesubresources.go | 52 + .../v1beta1/customresourcesubresourcescale.go | 57 + .../v1beta1/customresourcevalidation.go | 39 + .../v1beta1/externaldocumentation.go | 48 + .../apiextensions/v1beta1/jsonschemaprops.go | 463 + .../apiextensions/v1beta1/servicereference.go | 66 + .../apiextensions/v1beta1/validationrule.go | 57 + .../v1beta1/webhookclientconfig.go | 59 + .../v1/customresourcedefinition.go | 59 + .../v1beta1/customresourcedefinition.go | 59 + .../k8s.io/apimachinery/pkg/api/meta/help.go | 3 +- .../pkg/apis/meta/internalversion/defaults.go | 38 + .../pkg/apis/meta/internalversion/types.go | 25 + .../zz_generated.conversion.go | 2 + .../internalversion/zz_generated.deepcopy.go | 5 + .../pkg/apis/meta/v1/generated.pb.go | 385 +- .../pkg/apis/meta/v1/generated.proto | 69 +- .../apimachinery/pkg/apis/meta/v1/types.go | 69 +- .../meta/v1/types_swagger_doc_generated.go | 25 +- .../apis/meta/v1/zz_generated.conversion.go | 7 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 5 + .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/apimachinery/pkg/labels/labels.go | 2 + .../apimachinery/pkg/labels/selector.go | 131 +- .../pkg/runtime/schema/group_version.go | 6 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 3 +- .../pkg/runtime/serializer/codec_factory.go | 3 +- .../serializer/versioning/versioning.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 2 +- .../apimachinery/pkg/types/namespacedname.go | 11 + .../apimachinery/pkg/util/errors/errors.go | 2 +- .../apimachinery/pkg/util/framer/framer.go | 2 +- .../pkg/util/managedfields/endpoints.yaml | 7018 ++++++++++ .../pkg/util/managedfields/fieldmanager.go | 57 + .../managedfields/internal/atmostevery.go | 60 + .../internal/buildmanagerinfo.go | 74 + .../managedfields/internal/capmanagers.go | 133 + .../util/managedfields/internal/conflict.go | 89 + .../managedfields/internal/fieldmanager.go | 206 + .../pkg/util/managedfields/internal/fields.go | 47 + .../managedfields/internal/lastapplied.go | 50 + .../internal/lastappliedmanager.go | 171 + .../internal/lastappliedupdater.go | 102 + .../managedfields/internal/managedfields.go | 248 + .../internal/managedfieldsupdater.go | 82 + .../util/managedfields/internal/manager.go | 52 + .../managedfields/internal/pathelement.go | 140 + .../managedfields/internal/skipnonapplied.go | 91 + .../util/managedfields/internal/stripmeta.go | 90 + .../managedfields/internal/structuredmerge.go | 186 + .../managedfields/internal/typeconverter.go | 193 + .../internal/versionconverter.go | 123 + .../pkg/util/managedfields/node.yaml | 261 + .../pkg/util/managedfields/pod.yaml | 121 + .../pkg/util/managedfields/scalehandler.go | 174 + .../pkg/util/managedfields/typeconverter.go | 47 + .../apimachinery/pkg/util/mergepatch/util.go | 3 +- .../apimachinery/pkg/util/runtime/runtime.go | 15 +- .../k8s.io/apimachinery/pkg/util/sets/set.go | 14 + .../pkg/util/strategicpatch/OWNERS | 1 + .../pkg/util/strategicpatch/patch.go | 2 +- .../pkg/util/validation/validation.go | 8 +- .../apimachinery/pkg/util/wait/backoff.go | 502 + .../apimachinery/pkg/util/wait/delay.go | 51 + .../apimachinery/pkg/util/wait/error.go | 96 + .../k8s.io/apimachinery/pkg/util/wait/loop.go | 97 + .../k8s.io/apimachinery/pkg/util/wait/poll.go | 315 + .../apimachinery/pkg/util/wait/timer.go | 121 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 634 +- .../v1/matchcondition.go | 48 + .../v1/mutatingwebhook.go | 14 + .../v1/validatingwebhook.go | 14 + .../v1alpha1/admissionpolicyspec.go | 75 - .../v1alpha1/auditannotation.go | 48 + .../v1alpha1/expressionwarning.go | 48 + .../{paramsource.go => matchcondition.go} | 30 +- .../admissionregistration/v1alpha1/rule.go | 76 - .../v1alpha1/rulewithoperations.go | 85 - .../v1alpha1/typechecking.go | 44 + .../v1alpha1/validatingadmissionpolicy.go | 11 +- .../validatingadmissionpolicybindingspec.go | 21 +- .../v1alpha1/validatingadmissionpolicyspec.go | 28 + .../validatingadmissionpolicystatus.go | 66 + .../v1alpha1/validation.go | 15 +- .../v1beta1/matchcondition.go | 48 + .../v1beta1/mutatingwebhook.go | 14 + .../admissionregistration/v1beta1/rule.go | 76 - .../v1beta1/rulewithoperations.go | 85 - .../v1beta1/validatingwebhook.go | 14 + .../autoscaling/v2/podresourcemetricsource.go | 52 - .../v1alpha1/clustertrustbundle.go} | 95 +- .../v1alpha1/clustertrustbundlespec.go | 48 + .../applyconfigurations/core/v1/container.go | 58 +- .../core/v1/containerresizepolicy.go | 52 + .../core/v1/containerstatus.go | 40 +- .../core/v1/ephemeralcontainer.go | 13 + .../core/v1/ephemeralcontainercommon.go | 58 +- .../applyconfigurations/core/v1/podstatus.go | 9 + .../core/v1/servicespec.go | 8 +- .../extensions/v1beta1/allowedcsidriver.go | 39 - .../extensions/v1beta1/allowedflexvolume.go | 39 - .../extensions/v1beta1/allowedhostpath.go | 48 - .../v1beta1/fsgroupstrategyoptions.go | 57 - .../extensions/v1beta1/hostportrange.go | 48 - .../extensions/v1beta1/idrange.go | 48 - .../v1beta1/podsecuritypolicyspec.go | 285 - .../v1beta1/runasgroupstrategyoptions.go | 57 - .../v1beta1/runasuserstrategyoptions.go | 57 - .../v1beta1/runtimeclassstrategyoptions.go | 50 - .../v1beta1/selinuxstrategyoptions.go | 53 - .../supplementalgroupsstrategyoptions.go | 57 - .../applyconfigurations/internal/internal.go | 554 +- .../meta/v1/groupversionkind.go | 57 - .../applyconfigurations/meta/v1/listmeta.go | 66 - .../applyconfigurations/meta/v1/status.go | 142 - .../meta/v1/statuscause.go | 61 - .../meta/v1/statusdetails.go | 93 - .../v1alpha1/ipaddress.go} | 86 +- .../networking/v1alpha1/ipaddressspec.go | 39 + .../networking/v1alpha1/parentreference.go | 79 + .../allocationresult.go | 19 +- .../resource/v1alpha2/podschedulingcontext.go | 258 + .../podschedulingcontextspec.go} | 16 +- .../podschedulingcontextstatus.go} | 14 +- .../{v1alpha1 => v1alpha2}/resourceclaim.go | 16 +- .../resourceclaimconsumerreference.go | 2 +- .../resourceclaimparametersreference.go | 2 +- .../resourceclaimschedulingstatus.go | 2 +- .../resourceclaimspec.go | 8 +- .../resourceclaimstatus.go | 2 +- .../resourceclaimtemplate.go | 16 +- .../resourceclaimtemplatespec.go | 2 +- .../{v1alpha1 => v1alpha2}/resourceclass.go | 16 +- .../resourceclassparametersreference.go | 2 +- .../resource/v1alpha2/resourcehandle.go | 48 + .../client-go/discovery/discovery_client.go | 46 +- .../k8s.io/client-go/kubernetes/clientset.go | 29 +- vendor/k8s.io/client-go/kubernetes/doc.go | 7 +- .../client-go/kubernetes/scheme/register.go | 6 +- .../v1alpha1/validatingadmissionpolicy.go | 46 + .../v1beta1/authentication_client.go | 5 + .../v1beta1/generated_expansion.go | 2 + .../v1beta1/selfsubjectreview.go | 64 + .../v1alpha1/certificates_client.go | 107 + .../v1alpha1/clustertrustbundle.go | 197 + .../v1alpha1/doc.go | 0 .../v1alpha1/generated_expansion.go} | 5 +- .../typed/events/v1beta1/event_expansion.go | 3 +- .../extensions/v1beta1/extensions_client.go | 5 - .../extensions/v1beta1/generated_expansion.go | 2 - .../extensions/v1beta1/podsecuritypolicy.go | 197 - .../v1alpha1/generated_expansion.go | 2 + .../typed/networking/v1alpha1/ipaddress.go | 197 + .../networking/v1alpha1/networking_client.go | 5 + .../typed/resource/v1alpha1/podscheduling.go | 256 - .../kubernetes/typed/resource/v1alpha2/doc.go | 20 + .../generated_expansion.go | 4 +- .../resource/v1alpha2/podschedulingcontext.go | 256 + .../{v1alpha1 => v1alpha2}/resource_client.go | 46 +- .../{v1alpha1 => v1alpha2}/resourceclaim.go | 56 +- .../resourceclaimtemplate.go | 44 +- .../{v1alpha1 => v1alpha2}/resourceclass.go | 44 +- vendor/k8s.io/client-go/openapi/OWNERS | 4 + vendor/k8s.io/client-go/pkg/version/base.go | 3 +- vendor/k8s.io/client-go/rest/client.go | 3 +- vendor/k8s.io/client-go/rest/request.go | 45 +- vendor/k8s.io/client-go/rest/with_retry.go | 18 +- .../client-go/tools/cache/controller.go | 72 +- .../client-go/tools/cache/delta_fifo.go | 7 +- vendor/k8s.io/client-go/tools/cache/fifo.go | 14 +- .../k8s.io/client-go/tools/cache/reflector.go | 477 +- .../client-go/tools/cache/shared_informer.go | 141 +- .../client-go/tools/cache/synctrack/lazy.go | 83 + .../tools/cache/synctrack/synctrack.go | 120 + .../tools/leaderelection/leaderelection.go | 15 +- .../leaderelection/resourcelock/interface.go | 2 +- .../leaderelection/resourcelock/leaselock.go | 8 +- .../k8s.io/client-go/tools/metrics/metrics.go | 17 + vendor/k8s.io/client-go/tools/record/event.go | 53 +- vendor/k8s.io/client-go/tools/record/fake.go | 23 +- vendor/k8s.io/client-go/util/cert/cert.go | 2 +- .../util/workqueue/delaying_queue.go | 61 +- .../client-go/util/workqueue/metrics.go | 9 +- .../k8s.io/client-go/util/workqueue/queue.go | 53 +- .../util/workqueue/rate_limiting_queue.go | 61 +- vendor/k8s.io/component-base/config/types.go | 2 +- .../component-base/config/v1alpha1/types.go | 2 +- vendor/k8s.io/klog/v2/format.go | 65 + .../klog/v2/internal/serialize/keyvalues.go | 47 +- vendor/k8s.io/klog/v2/k8s_references.go | 12 +- vendor/k8s.io/klog/v2/klog.go | 13 + .../k8s.io/kube-openapi/pkg/cached/cache.go | 21 +- vendor/k8s.io/utils/trace/trace.go | 19 + vendor/knative.dev/pkg/apis/OWNERS | 14 +- vendor/knative.dev/pkg/apis/duck/OWNERS | 6 +- .../pkg/apis/duck/v1/addressable_types.go | 21 +- .../pkg/apis/duck/v1/destination.go | 11 + .../pkg/apis/duck/v1/knative_reference.go | 4 + .../pkg/apis/duck/v1/zz_generated.deepcopy.go | 29 +- vendor/knative.dev/pkg/configmap/OWNERS | 6 - vendor/knative.dev/pkg/kmeta/OWNERS | 5 - vendor/knative.dev/pkg/logging/OWNERS | 6 - vendor/knative.dev/pkg/metrics/OWNERS | 10 - vendor/knative.dev/pkg/metrics/config.go | 22 +- .../pkg/metrics/config_observability.go | 35 + .../pkg/metrics/opencensus_exporter.go | 2 +- .../pkg/metrics/prometheus_exporter.go | 7 +- vendor/modules.txt | 104 +- .../controller-runtime/.golangci.yml | 224 +- .../sigs.k8s.io/controller-runtime/Makefile | 14 +- .../controller-runtime/OWNERS_ALIASES | 10 +- .../sigs.k8s.io/controller-runtime/README.md | 6 +- .../sigs.k8s.io/controller-runtime/RELEASE.md | 2 +- .../controller-runtime/SECURITY_CONTACTS | 5 +- .../sigs.k8s.io/controller-runtime/alias.go | 4 +- vendor/sigs.k8s.io/controller-runtime/doc.go | 4 +- .../pkg/builder/controller.go | 88 +- .../controller-runtime/pkg/builder/options.go | 16 + .../controller-runtime/pkg/builder/webhook.go | 63 +- .../controller-runtime/pkg/cache/cache.go | 481 +- .../pkg/cache/informer_cache.go | 84 +- .../pkg/cache/internal/cache_reader.go | 4 +- .../pkg/cache/internal/deleg_map.go | 126 - .../pkg/cache/internal/disabledeepcopy.go | 35 - .../pkg/cache/internal/informers.go | 560 + .../pkg/cache/internal/informers_map.go | 480 - .../pkg/cache/internal/selector.go | 15 - .../pkg/cache/internal/transformers.go | 8 +- .../pkg/cache/multi_namespace_cache.go | 96 +- .../pkg/certwatcher/certwatcher.go | 54 +- .../pkg/client/apiutil/apimachinery.go | 66 +- .../pkg/client/apiutil/dynamicrestmapper.go | 301 - .../{lazyrestmapper.go => restmapper.go} | 165 +- .../controller-runtime/pkg/client/client.go | 187 +- ...ient_cache.go => client_rest_resources.go} | 27 +- .../pkg/client/config/config.go | 6 +- .../controller-runtime/pkg/client/doc.go | 3 +- .../controller-runtime/pkg/client/dryrun.go | 11 + .../pkg/client/interfaces.go | 5 + .../pkg/client/namespaced_client.go | 33 +- .../controller-runtime/pkg/client/options.go | 30 +- .../controller-runtime/pkg/client/split.go | 143 - .../pkg/client/typed_client.go | 26 +- .../pkg/client/unstructured_client.go | 79 +- .../controller-runtime/pkg/client/watch.go | 30 +- .../controller-runtime/pkg/cluster/cluster.go | 196 +- .../pkg/cluster/internal.go | 41 +- .../controller-runtime/pkg/config/config.go | 12 +- .../pkg/config/controller.go | 49 + .../controller-runtime/pkg/config/doc.go | 10 +- .../pkg/config/v1alpha1/doc.go | 2 + .../pkg/config/v1alpha1/register.go | 6 + .../pkg/config/v1alpha1/types.go | 19 +- .../pkg/controller/controller.go | 43 +- .../controller-runtime/pkg/envtest/crd.go | 4 +- .../controller-runtime/pkg/envtest/server.go | 24 +- .../controller-runtime/pkg/envtest/webhook.go | 16 +- .../controller-runtime/pkg/handler/enqueue.go | 10 +- .../pkg/handler/enqueue_mapped.go | 37 +- .../pkg/handler/enqueue_owner.go | 86 +- .../pkg/handler/eventhandler.go | 34 +- .../pkg/internal/controller/controller.go | 43 +- .../internal/controller/metrics/metrics.go | 8 + .../pkg/internal/objectutil/objectutil.go | 78 - .../pkg/internal/recorder/recorder.go | 9 +- .../source/event_handler.go} | 58 +- .../pkg/internal/source/kind.go | 117 + .../controller-runtime/pkg/log/deleg.go | 40 +- .../controller-runtime/pkg/log/log.go | 46 +- .../pkg/log/zap/kube_helpers.go | 22 - .../pkg/manager/internal.go | 120 +- .../controller-runtime/pkg/manager/manager.go | 185 +- .../pkg/manager/runnable_group.go | 2 +- .../controller-runtime/pkg/manager/server.go | 61 + .../pkg/metrics/client_go_adapter.go | 89 +- .../pkg/predicate/predicate.go | 23 - .../pkg/reconcile/reconcile.go | 28 + .../pkg/runtime/inject/doc.go | 22 - .../pkg/runtime/inject/inject.go | 164 - .../controller-runtime/pkg/source/source.go | 183 +- .../pkg/webhook/admission/decode.go | 17 +- .../pkg/webhook/admission/defaulter.go | 15 +- .../pkg/webhook/admission/defaulter_custom.go | 14 +- .../pkg/webhook/admission/doc.go | 6 - .../pkg/webhook/admission/http.go | 20 +- .../pkg/webhook/admission/inject.go | 31 - .../pkg/webhook/admission/multi.go | 52 - .../pkg/webhook/admission/response.go | 23 +- .../pkg/webhook/admission/validator.go | 105 +- .../pkg/webhook/admission/validator_custom.go | 53 +- .../pkg/webhook/admission/webhook.go | 119 +- .../pkg/webhook/conversion/conversion.go | 34 +- .../pkg/webhook/conversion/decoder.go | 7 +- .../controller-runtime/pkg/webhook/server.go | 221 +- .../v4/merge/conflict.go | 121 + .../structured-merge-diff/v4/merge/update.go | 356 + 748 files changed, 45896 insertions(+), 23311 deletions(-) create mode 100644 vendor/github.com/cloudflare/circl/math/primes.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go rename vendor/github.com/{matttproud/golang_protobuf_extensions/pbutil/doc.go => prometheus/client_golang/prometheus/process_collector_wasip1.go} (63%) create mode 100644 vendor/github.com/prometheus/common/model/metadata.go delete mode 100644 vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/build_conversion.go delete mode 100644 vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildrun_conversion.go delete mode 100644 vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildstrategy_conversion.go delete mode 100644 vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/clusterbuildstrategy_conversion.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/contexts.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/events.go rename vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/{storagetypes.go => constant.go} (67%) rename vendor/go.uber.org/zap/{LICENSE.txt => LICENSE} (100%) create mode 100644 vendor/golang.org/x/net/http2/testsync.go create mode 100644 vendor/golang.org/x/oauth2/deviceauth.go create mode 100644 vendor/golang.org/x/oauth2/pkce.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go create mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go create mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/editions.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe.go => strings_unsafe_go120.go} (96%) create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions.go rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe.go => value_unsafe_go120.go} (97%) create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go create mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/doc.go (84%) rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/generated.pb.go (83%) rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/generated.proto (79%) rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/register.go (95%) rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/types.go (81%) rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/types_swagger_doc_generated.go (76%) rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/zz_generated.deepcopy.go (88%) create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcecolumndefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionnames.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionspec.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionstatus.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresourcescale.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcevalidation.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/externaldocumentation.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/servicereference.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookclientconfig.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookconversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcecolumndefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionnames.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionstatus.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresourcescale.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcevalidation.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/externaldocumentation.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/servicereference.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/webhookclientconfig.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/delay.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/error.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/loop.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/poll.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/timer.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go rename vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/{paramsource.go => matchcondition.go} (50%) delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go rename vendor/k8s.io/client-go/applyconfigurations/{resource/v1alpha1/podscheduling.go => certificates/v1alpha1/clustertrustbundle.go} (64%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/podsecuritypolicy.go => networking/v1alpha1/ipaddress.go} (66%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/allocationresult.go (76%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1/podschedulingspec.go => v1alpha2/podschedulingcontextspec.go} (67%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1/podschedulingstatus.go => v1alpha2/podschedulingcontextstatus.go} (64%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaim.go (96%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimconsumerreference.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimparametersreference.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimschedulingstatus.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimspec.go (93%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimstatus.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimtemplate.go (96%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimtemplatespec.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclass.go (96%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclassparametersreference.go (99%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go rename vendor/k8s.io/client-go/kubernetes/typed/{resource => certificates}/v1alpha1/doc.go (100%) rename vendor/k8s.io/{apiextensions-apiserver/pkg/client/clientset/clientset/doc.go => client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go} (88%) delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha1 => v1alpha2}/generated_expansion.go (92%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha1 => v1alpha2}/resource_client.go (66%) rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha1 => v1alpha2}/resourceclaim.go (79%) rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha1 => v1alpha2}/resourceclaimtemplate.go (80%) rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha1 => v1alpha2}/resourceclass.go (80%) create mode 100644 vendor/k8s.io/client-go/openapi/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go create mode 100644 vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go create mode 100644 vendor/k8s.io/klog/v2/format.go delete mode 100644 vendor/knative.dev/pkg/configmap/OWNERS delete mode 100644 vendor/knative.dev/pkg/kmeta/OWNERS delete mode 100644 vendor/knative.dev/pkg/logging/OWNERS delete mode 100644 vendor/knative.dev/pkg/metrics/OWNERS delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go rename vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/{lazyrestmapper.go => restmapper.go} (57%) rename vendor/sigs.k8s.io/controller-runtime/pkg/client/{client_cache.go => client_rest_resources.go} (82%) delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go rename vendor/sigs.k8s.io/controller-runtime/pkg/{source/internal/eventsource.go => internal/source/event_handler.go} (67%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go diff --git a/go.mod b/go.mod index ea8dfaa1..e5609f86 100644 --- a/go.mod +++ b/go.mod @@ -5,17 +5,17 @@ go 1.21 require ( github.com/go-logr/logr v1.4.1 github.com/google/go-github/v53 v53.2.0 - github.com/onsi/ginkgo/v2 v2.16.0 - github.com/onsi/gomega v1.31.1 - github.com/shipwright-io/build v0.12.0 - github.com/tektoncd/pipeline v0.47.4 - k8s.io/api v0.26.9 - k8s.io/apimachinery v0.26.9 - k8s.io/apiserver v0.26.9 - k8s.io/client-go v0.26.9 - k8s.io/utils v0.0.0-20230209194617-a36077c30491 - knative.dev/pkg v0.0.0-20230221145627-8efb3485adcf - sigs.k8s.io/controller-runtime v0.14.6 + github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/gomega v1.32.0 + github.com/shipwright-io/build v0.13.0 + github.com/tektoncd/pipeline v0.50.5 + k8s.io/api v0.27.11 + k8s.io/apimachinery v0.27.11 + k8s.io/apiserver v0.27.11 + k8s.io/client-go v0.27.11 + k8s.io/utils v0.0.0-20230505201702-9f6742963106 + knative.dev/pkg v0.0.0-20231011201526-df28feae6d34 + sigs.k8s.io/controller-runtime v0.15.3 ) require ( @@ -26,7 +26,7 @@ require ( github.com/blendle/zapdriver v1.3.1 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cloudflare/circl v1.3.3 // indirect + github.com/cloudflare/circl v1.3.7 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect @@ -43,7 +43,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-containerregistry v0.16.1 // indirect + github.com/google/go-containerregistry v0.19.1 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230728192033-2ba5b33183c6 // indirect @@ -55,29 +55,28 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.17.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/statsd_exporter v0.24.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.24.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.18.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.22.0 // indirect golang.org/x/exp v0.0.0-20230307190834-24139beb5833 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.16.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.17.0 // indirect @@ -87,14 +86,14 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e // indirect google.golang.org/grpc v1.58.3 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.26.9 // indirect - k8s.io/component-base v0.26.9 // indirect - k8s.io/klog/v2 v2.90.1 // indirect - k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect + k8s.io/apiextensions-apiserver v0.27.11 // indirect + k8s.io/component-base v0.27.11 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/go.sum b/go.sum index cacca82e..68d843b4 100644 --- a/go.sum +++ b/go.sum @@ -70,8 +70,9 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudevents/sdk-go/v2 v2.14.0 h1:Nrob4FwVgi5L4tV9lhjzZcjYqFVyJzsA56CwPaPfv6s= github.com/cloudevents/sdk-go/v2 v2.14.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUEeUfapHMUX1T5To= -github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -182,8 +183,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= -github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= +github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= github.com/google/go-github/v53 v53.2.0 h1:wvz3FyF53v4BK+AsnvCmeNhf8AkTaeh2SoYu/XUvTtI= github.com/google/go-github/v53 v53.2.0/go.mod h1:XhFRObz+m/l+UCm9b7KSIC3lT3NWSXGt7mOsAWEloao= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -256,8 +257,6 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -269,10 +268,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM= -github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= -github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= -github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -288,39 +287,39 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/prometheus/statsd_exporter v0.24.0 h1:aZmN6CzS2H1Non1JKZdjkQlAkDtGoQBYIESk2SlU1OI= github.com/prometheus/statsd_exporter v0.24.0/go.mod h1:+dQiRTqn9DnPmN5mI5Xond+k8nuRKzdgh1omxh9OgFY= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/shipwright-io/build v0.12.0 h1:uK3nLHfyMMJzsId3B0Ta4frufYRnlHDcm/BNurt2DvI= -github.com/shipwright-io/build v0.12.0/go.mod h1:VtpWnqEi98H6GHRiPh2DK5f6uzZnPuYRhTm6AO1cuTo= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/shipwright-io/build v0.13.0 h1:UBap+Mk6P0fji/sLc3eBmDlAPWvZWE4KdSQiVUkwYEU= +github.com/shipwright-io/build v0.13.0/go.mod h1:vBVRQYgbriB42LLbtLuCALTT93PWqJZI9hC1+TqOegg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -344,8 +343,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= -github.com/tektoncd/pipeline v0.47.4 h1:MWc/Et/kil7BwJGs1MlJ2sJaijr4USxw7Mp9Q7K21x8= -github.com/tektoncd/pipeline v0.47.4/go.mod h1:7H1DeNuEJFGoExGwQTlRul2IziCPxkjXRdDdirWmoQs= +github.com/tektoncd/pipeline v0.50.5 h1:+wvALTyl/4M9es4K5aiZ5TAeWWoMGXBo4ppMnleQg4Y= +github.com/tektoncd/pipeline v0.50.5/go.mod h1:33ZU30CR8Pbr6Pb4l7+Tz1oPGsJBY5yxyG8Z+ejGO0w= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= @@ -369,16 +368,16 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -388,8 +387,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -468,8 +467,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -477,8 +476,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -544,15 +543,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -722,8 +721,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -753,31 +752,31 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.9 h1:s8Y+G1u2JM55b90+Yo2RVb3PGT/hkWNVPN4idPERxJg= -k8s.io/api v0.26.9/go.mod h1:W/W4fEWRVzPD36820LlVUQfNBiSbiq0VPWRFJKwzmUg= -k8s.io/apiextensions-apiserver v0.26.9 h1:aJqWRuBj9i9J6tIDniqUDYM5QCRajTKXK/GO+zEccGQ= -k8s.io/apiextensions-apiserver v0.26.9/go.mod h1:L1uysxOP2kC1vkZTlHGUlUl5WSpa7e4GHJmGEZY7yLg= -k8s.io/apimachinery v0.26.9 h1:5yAV9cFR7Z4gIorKcAjWnx4uxtxiFsERwq4Pvmx0CCg= -k8s.io/apimachinery v0.26.9/go.mod h1:qYzLkrQ9lhrZRh0jNKo2cfvf/R1/kQONnSiyB7NUJU0= -k8s.io/apiserver v0.26.9 h1:G8D5XIXbhLzqdRY3FajzkKE2lt8hnAW5Vjq67mzEeR8= -k8s.io/apiserver v0.26.9/go.mod h1:HY2TzNkDgq71jsNLyk61ZoDrpiyvujdY6kHyT9DwvtU= -k8s.io/client-go v0.26.9 h1:TGWi/6guEjIgT0Hg871Gsmx0qFuoGyGFjlFedrk7It0= -k8s.io/client-go v0.26.9/go.mod h1:tU1FZS0bwAmAFyPYpZycUQrQnUMzQ5MHloop7EbX6ow= -k8s.io/component-base v0.26.9 h1:qQVdQgyEIUe8EUkB3EEuQ9l5sgVlG2KgOB519yWEBGw= -k8s.io/component-base v0.26.9/go.mod h1:3WmW9lH9tbjpuvpAc22cPF/6C3VxCjMxkOU1j2mpzr8= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg= -k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= -k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= -k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -knative.dev/pkg v0.0.0-20230221145627-8efb3485adcf h1:TwvZFDpkyqpK2OCAwvNGV2Zjk14FzIh8X8Ci/du3jYI= -knative.dev/pkg v0.0.0-20230221145627-8efb3485adcf/go.mod h1:VO/fcEsq43seuONRQxZyftWHjpMabYzRHDtpSEQ/eoQ= +k8s.io/api v0.27.11 h1:IsGrWbXt7RkE+arc9GLQPYI5AtZkT+feBMorY+Nzx4I= +k8s.io/api v0.27.11/go.mod h1:SGqTcyqa0e+Db3pgyH6v+por5dO2OdTkKDCdD3Op3Ng= +k8s.io/apiextensions-apiserver v0.27.11 h1:+NZfWpN842yab/O1RRDu8+11eSACA2cPa2VixSxiONM= +k8s.io/apiextensions-apiserver v0.27.11/go.mod h1:BhfSAHizE6MjUiD0FtBxwOakCr8/GLB7Icepd0jk7XU= +k8s.io/apimachinery v0.27.11 h1:ivrKMN7JgdtKhay14S5UQlvilV3z6W+wjiSQTzyr5zc= +k8s.io/apimachinery v0.27.11/go.mod h1:IHu2ovJ60RqxyPSLmTel7KDLdOCRbpOxwtUBmwBnT/E= +k8s.io/apiserver v0.27.11 h1:ztQgOjOlRLeRqii0vWBKmXAPj+NS4RW75sUHk8IobIs= +k8s.io/apiserver v0.27.11/go.mod h1:fQNBsQ2CI0XJz0dD6hLuStPLR9ym6EmUBn5TuUJBsQU= +k8s.io/client-go v0.27.11 h1:SZChXsDaN6lB5IYywCpvQs/ZUa5vK2NHkpEwUhoK3fQ= +k8s.io/client-go v0.27.11/go.mod h1:Rg3Yeuk9sX87gpVunVn3AsvMkGZfXuutTDC/jigBNUo= +k8s.io/component-base v0.27.11 h1:oq1xukCfjOlF6Jpe7/1PMOXhzicW3HqaUi8aaBmvCEM= +k8s.io/component-base v0.27.11/go.mod h1:YHs4U6ETkkvGj7NR44ISaSMiDQrV7nPtUnIenALJicw= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5 h1:azYPdzztXxPSa8wb+hksEKayiz0o+PPisO/d+QhWnoo= +k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5/go.mod h1:kzo02I3kQ4BTtEfVLaPbjvCkX97YqGve33wzlb3fofQ= +k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= +k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +knative.dev/pkg v0.0.0-20231011201526-df28feae6d34 h1:H+K37bEBZ2STSWMjCgrdilj38KKZGVxBbob22K99Y50= +knative.dev/pkg v0.0.0-20231011201526-df28feae6d34/go.mod h1:ZRgzFBFmdBsARm6+Pkr9WRG8bXys8rYq64ELfLG6+9w= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA= -sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= +sigs.k8s.io/controller-runtime v0.15.3 h1:L+t5heIaI3zeejoIyyvLQs5vTVu/67IU2FfisVzFlBc= +sigs.k8s.io/controller-runtime v0.15.3/go.mod h1:kp4jckA4vTx281S/0Yk2LFEEQe67mjg+ev/yknv47Ds= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go index 8cd4e333..83d7cdad 100644 --- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go @@ -9,7 +9,7 @@ import ( fp "github.com/cloudflare/circl/math/fp448" ) -// twistCurve is -x^2+y^2=1-39082x^2y^2 and is 4-isogeneous to Goldilocks. +// twistCurve is -x^2+y^2=1-39082x^2y^2 and is 4-isogenous to Goldilocks. type twistCurve struct{} // Identity returns the identity point. diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go index ab19d0ad..1755fd1e 100644 --- a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go +++ b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go @@ -6,13 +6,21 @@ package sha3 // KeccakF1600 applies the Keccak permutation to a 1600b-wide // state represented as a slice of 25 uint64s. +// If turbo is true, applies the 12-round variant instead of the +// regular 24-round variant. // nolint:funlen -func KeccakF1600(a *[25]uint64) { +func KeccakF1600(a *[25]uint64, turbo bool) { // Implementation translated from Keccak-inplace.c // in the keccak reference code. var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 - for i := 0; i < 24; i += 4 { + i := 0 + + if turbo { + i = 12 + } + + for ; i < 24; i += 4 { // Combines the 5 steps in each round into 2 steps. // Unrolls 4 rounds per loop and spreads some steps across rounds. diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go index b35cd006..a0df5aa6 100644 --- a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go +++ b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go @@ -51,6 +51,7 @@ type State struct { // Specific to SHA-3 and SHAKE. outputLen int // the default output size in bytes state spongeDirection // whether the sponge is absorbing or squeezing + turbo bool // Whether we're using 12 rounds instead of 24 } // BlockSize returns the rate of sponge underlying this hash function. @@ -86,11 +87,11 @@ func (d *State) permute() { xorIn(d, d.buf()) d.bufe = 0 d.bufo = 0 - KeccakF1600(&d.a) + KeccakF1600(&d.a, d.turbo) case spongeSqueezing: // If we're squeezing, we need to apply the permutation before // copying more output. - KeccakF1600(&d.a) + KeccakF1600(&d.a, d.turbo) d.bufe = d.rate d.bufo = 0 copyOut(d, d.buf()) @@ -136,7 +137,7 @@ func (d *State) Write(p []byte) (written int, err error) { // The fast path; absorb a full "rate" bytes of input and apply the permutation. xorIn(d, p[:d.rate]) p = p[d.rate:] - KeccakF1600(&d.a) + KeccakF1600(&d.a, d.turbo) } else { // The slow path; buffer the input until we can fill the sponge, and then xor it in. todo := d.rate - bufl @@ -193,3 +194,7 @@ func (d *State) Sum(in []byte) []byte { _, _ = dup.Read(hash) return append(in, hash...) } + +func (d *State) IsAbsorbing() bool { + return d.state == spongeAbsorbing +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go index b92c5b7d..77817f75 100644 --- a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go +++ b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go @@ -57,6 +57,17 @@ func NewShake128() State { return State{rate: rate128, dsbyte: dsbyteShake} } +// NewTurboShake128 creates a new TurboSHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +// D is the domain separation byte and must be between 0x01 and 0x7f inclusive. +func NewTurboShake128(D byte) State { + if D == 0 || D > 0x7f { + panic("turboshake: D out of range") + } + return State{rate: rate128, dsbyte: D, turbo: true} +} + // NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. // Its generic security strength is 256 bits against all attacks if // at least 64 bytes of its output are used. @@ -64,6 +75,17 @@ func NewShake256() State { return State{rate: rate256, dsbyte: dsbyteShake} } +// NewTurboShake256 creates a new TurboSHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +// D is the domain separation byte and must be between 0x01 and 0x7f inclusive. +func NewTurboShake256(D byte) State { + if D == 0 || D > 0x7f { + panic("turboshake: D out of range") + } + return State{rate: rate256, dsbyte: D, turbo: true} +} + // ShakeSum128 writes an arbitrary-length digest of data into hash. func ShakeSum128(hash, data []byte) { h := NewShake128() @@ -77,3 +99,21 @@ func ShakeSum256(hash, data []byte) { _, _ = h.Write(data) _, _ = h.Read(hash) } + +// TurboShakeSum128 writes an arbitrary-length digest of data into hash. +func TurboShakeSum128(hash, data []byte, D byte) { + h := NewTurboShake128(D) + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +// TurboShakeSum256 writes an arbitrary-length digest of data into hash. +func TurboShakeSum256(hash, data []byte, D byte) { + h := NewTurboShake256(D) + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +func (d *State) SwitchDS(D byte) { + d.dsbyte = D +} diff --git a/vendor/github.com/cloudflare/circl/math/primes.go b/vendor/github.com/cloudflare/circl/math/primes.go new file mode 100644 index 00000000..158fd83a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/primes.go @@ -0,0 +1,34 @@ +package math + +import ( + "crypto/rand" + "io" + "math/big" +) + +// IsSafePrime reports whether p is (probably) a safe prime. +// The prime p=2*q+1 is safe prime if both p and q are primes. +// Note that ProbablyPrime is not suitable for judging primes +// that an adversary may have crafted to fool the test. +func IsSafePrime(p *big.Int) bool { + pdiv2 := new(big.Int).Rsh(p, 1) + return p.ProbablyPrime(20) && pdiv2.ProbablyPrime(20) +} + +// SafePrime returns a number of the given bit length that is a safe prime with high probability. +// The number returned p=2*q+1 is a safe prime if both p and q are primes. +// SafePrime will return error for any error returned by rand.Read or if bits < 2. +func SafePrime(random io.Reader, bits int) (*big.Int, error) { + one := big.NewInt(1) + p := new(big.Int) + for { + q, err := rand.Prime(random, bits-1) + if err != nil { + return nil, err + } + p.Lsh(q, 1).Add(p, one) + if p.ProbablyPrime(20) { + return p, nil + } + } +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go index 08ca65d7..2c73c26f 100644 --- a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go @@ -1,7 +1,7 @@ // Package ed25519 implements Ed25519 signature scheme as described in RFC-8032. // // This package provides optimized implementations of the three signature -// variants and maintaining closer compatiblilty with crypto/ed25519. +// variants and maintaining closer compatibility with crypto/ed25519. // // | Scheme Name | Sign Function | Verification | Context | // |-------------|-------------------|---------------|-------------------| diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE deleted file mode 100644 index 8dada3ed..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE deleted file mode 100644 index 5d8cb5b7..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore deleted file mode 100644 index e16fb946..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile deleted file mode 100644 index 81be2143..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go deleted file mode 100644 index 258c0636..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/golang/protobuf/proto" -) - -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go deleted file mode 100644 index 8fb59ad2..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "github.com/golang/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index b79ee9c5..44222220 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,19 @@ +## 2.17.1 + +### Fixes +- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d] + +## 2.17.0 + +### Features + +- add `--github-output` for nicer output in github actions [e8a2056] + +### Maintenance + +- fix typo in core_dsl.go [977bc6f] +- Fix typo in docs [e297e7b] + ## 2.16.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index 0e633f30..a3e8237e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -792,8 +792,8 @@ DeferCleanup can be passed: For example: BeforeEach(func() { - DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) - os.SetEnv("FOO", "BAR") + DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO")) + os.Setenv("FOO", "BAR") }) will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 56b7be75..4026859e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -182,6 +182,22 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) { r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) } +func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { + r.emitBlock("\n") + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::group::%s", sectionName)) + } else { + r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) + } + fn() + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::endgroup::")) + } else { + r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) + } + +} + func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel @@ -283,26 +299,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { //Emit Stdout/Stderr Output if showSeparateStdSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + r.wrapTextBlock("Captured StdOut/StdErr Output", func() { + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + }) } if showSeparateVisibilityAlwaysReportsSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) - for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { - r.emitReportEntry(1, entry) - } - r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + r.wrapTextBlock("Report Entries", func() { + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + }) } if showTimeline { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) - r.emitTimeline(1, report, timeline) - r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + r.wrapTextBlock("Timeline", func() { + r.emitTimeline(1, report, timeline) + }) } // Emit Failure Message @@ -405,7 +418,11 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if r.conf.GithubOutput { + r.emitBlock(r.fi(indent, "::error file=%s,line=%d::%s %s", failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } else { + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index c88fc85a..cef273ee 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -89,6 +89,7 @@ type ReporterConfig struct { VeryVerbose bool FullTrace bool ShowNodeEvents bool + GithubOutput bool JSONReport string JUnitReport string @@ -264,7 +265,7 @@ var FlagSections = GinkgoFlagSections{ // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", - Usage: "The seed used to randomize the spec suite."}, + Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, @@ -331,6 +332,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", + Usage: "If set, default reporter prints easier to manage output in Github Actions."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index 9186ae87..de69f302 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -24,7 +24,8 @@ type GinkgoFlag struct { DeprecatedDocLink string DeprecatedVersion string - ExportAs string + ExportAs string + AlwaysExport bool } type GinkgoFlags []GinkgoFlag @@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error { return nil } -//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { result := []string{} for _, flag := range flags { @@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) iface := value.Interface() switch value.Type() { case reflect.TypeOf(string("")): - if iface.(string) != "" { + if iface.(string) != "" || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } case reflect.TypeOf(int64(0)): - if iface.(int64) != 0 { + if iface.(int64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(float64(0)): - if iface.(float64) != 0 { + if iface.(float64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%f", name, iface)) } case reflect.TypeOf(int(0)): - if iface.(int) != 0 { + if iface.(int) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(bool(true)): @@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) result = append(result, fmt.Sprintf("--%s", name)) } case reflect.TypeOf(time.Duration(0)): - if iface.(time.Duration) != time.Duration(0) { + if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 675f8db2..851d42b4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.16.0" +const VERSION = "2.17.1" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 9a14b815..01ec5245 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,16 @@ +## 1.32.0 + +### Maintenance +- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197] + + This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one. + +- chore: test with Go 1.22 (#733) [32ef35e] +- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387] +- Bump github-pages and jekyll-feed in /docs (#732) [b71e477] +- docs: fix typo and broken anchor link to gstruct [f460154] +- docs: fix HaveEach matcher signature [a2862e4] + ## 1.31.1 ### Fixes diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 5b46a165..ffb81b1f 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.31.1" +const GOMEGA_VERSION = "1.32.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 1feba62c..b5c8bcb3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -475,6 +475,9 @@ type HistogramOpts struct { // now is for testing purposes, by default it's time.Now. now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } // HistogramVecOpts bundles the options to create a HistogramVec metric. @@ -526,7 +529,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.now == nil { opts.now = time.Now } - + if opts.afterFunc == nil { + opts.afterFunc = time.AfterFunc + } h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -536,6 +541,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, lastResetTime: opts.now(), now: opts.now, + afterFunc: opts.afterFunc, } if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { h.upperBounds = DefBuckets @@ -716,9 +722,16 @@ type histogram struct { nativeHistogramMinResetDuration time.Duration // lastResetTime is protected by mtx. It is also used as created timestamp. lastResetTime time.Time + // resetScheduled is protected by mtx. It is true if a reset is + // scheduled for a later time (when nativeHistogramMinResetDuration has + // passed). + resetScheduled bool // now is for testing purposes, by default it's time.Now. now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } func (h *histogram) Desc() *Desc { @@ -874,21 +887,31 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { return } + // One of the other strategies will happen. To undo what they will do as + // soon as enough time has passed to satisfy + // h.nativeHistogramMinResetDuration, schedule a reset at the right time + // if we haven't done so already. + if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled { + h.resetScheduled = true + h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset) + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { return } h.doubleBucketWidth(hotCounts, coldCounts) } -// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration -// has been passed. It returns true if the histogram has been reset. The caller -// must have locked h.mtx. +// maybeReset resets the whole histogram if at least +// h.nativeHistogramMinResetDuration has been passed. It returns true if the +// histogram has been reset. The caller must have locked h.mtx. func (h *histogram) maybeReset( hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, ) bool { // We are using the possibly mocked h.now() rather than // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || + if h.nativeHistogramMinResetDuration == 0 || // No reset configured. + h.resetScheduled || // Do not interefere if a reset is already scheduled. h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { return false } @@ -906,6 +929,29 @@ func (h *histogram) maybeReset( return true } +// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be +// called without having locked h.mtx. +func (h *histogram) reset() { + h.mtx.Lock() + defer h.mtx.Unlock() + + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hot := h.counts[hotIdx] + cold := h.counts[coldIdx] + // Completely reset coldCounts. + h.resetCounts(cold) + // Make coldCounts the new hot counts while resetting countAndHotIdx. + n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + h.resetScheduled = false +} + // maybeWidenZeroBucket widens the zero bucket until it includes the existing // buckets closest to the zero bucket (which could be two, if an equidistant // negative and a positive bucket exists, but usually it's only one bucket to be diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index b3c4eca2..c21911f2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -165,6 +165,8 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { + // The call below makes vals escape, copy them to avoid that. + vals := append([]string(nil), vals...) return fmt.Errorf( "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index c0152cdb..8c1136ce 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows && !js -// +build !windows,!js +//go:build !windows && !js && !wasip1 +// +build !windows,!js,!wasip1 package prometheus diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go similarity index 63% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go index c318385c..d8d9a6d7 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go @@ -1,10 +1,9 @@ -// Copyright 2013 Matt T. Proud -// +// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,5 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil +//go:build wasip1 +// +build wasip1 + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (*processCollector) processCollect(chan<- Metric) { + // noop on this platform + return +} diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index cee360db..2f154907 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -483,6 +483,8 @@ type Histogram struct { // histograms. PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. + // Only used for native histograms. These exemplars MUST have a timestamp. + Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"` } func (x *Histogram) Reset() { @@ -622,6 +624,13 @@ func (x *Histogram) GetPositiveCount() []float64 { return nil } +func (x *Histogram) GetExemplars() []*Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + // A Bucket of a conventional histogram, each of which is treated as // an individual counter-like time series by Prometheus. type Bucket struct { @@ -923,6 +932,7 @@ type MetricFamily struct { Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"` } func (x *MetricFamily) Reset() { @@ -985,6 +995,13 @@ func (x *MetricFamily) GetMetric() []*Metric { return nil } +func (x *MetricFamily) GetUnit() string { + if x != nil && x.Unit != nil { + return *x.Unit + } + return "" +} + var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ @@ -1028,7 +1045,7 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, @@ -1071,79 +1088,84 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, - 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, - 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, - 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, - 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, - 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, - 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, + 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, + 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, + 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, + 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, + 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, + 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, - 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, - 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, + 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, + 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, - 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, - 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, - 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, - 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, - 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, - 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, - 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, - 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, - 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55, - 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, - 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b, - 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48, - 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41, - 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42, - 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69, - 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, + 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, + 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a, + 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, + 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, + 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, + 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, + 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, + 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, + 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, } var ( @@ -1185,22 +1207,23 @@ var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan - 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar - 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair - 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp - 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair - 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge - 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter - 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary - 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped - 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram - 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType - 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric - 19, // [19:19] is the sub-list for method output_type - 19, // [19:19] is the sub-list for method input_type - 19, // [19:19] is the sub-list for extension type_name - 19, // [19:19] is the sub-list for extension extendee - 0, // [0:19] is the sub-list for field type_name + 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar + 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_io_prometheus_client_metrics_proto_init() } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 90639781..b2b89b01 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -14,6 +14,7 @@ package expfmt import ( + "bufio" "fmt" "io" "math" @@ -21,8 +22,8 @@ import ( "net/http" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/encoding/protodelim" - "github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/prometheus/common/model" ) @@ -44,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return FmtUnknown + return fmtUnknown } const textType = "text/plain" @@ -52,28 +53,28 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown + return fmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown + return fmtUnknown } - return FmtProtoDelim + return fmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown + return fmtUnknown } - return FmtText + return fmtText } - return FmtUnknown + return fmtUnknown } // NewDecoder returns a new decoder based on the given input format. // If the input format does not imply otherwise, a text format decoder is returned. func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: + switch format.FormatType() { + case TypeProtoDelim: return &protoDecoder{r: r} } return &textDecoder{r: r} @@ -86,8 +87,10 @@ type protoDecoder struct { // Decode implements the Decoder interface. func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { + opts := protodelim.UnmarshalOptions{ + MaxSize: -1, + } + if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil { return err } if !model.IsValidMetricName(model.LabelValue(v.GetName())) { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 7f611ffa..8fd80618 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,10 +18,12 @@ import ( "io" "net/http" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "github.com/prometheus/common/model" + dto "github.com/prometheus/client_model/go" ) @@ -60,23 +62,32 @@ func (ec encoderCloser) Close() error { // as the support is still experimental. To include the option to negotiate // FmtOpenMetrics, use NegotiateOpenMetrics. func Negotiate(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return fmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return fmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return fmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return fmtText + escapingScheme } } - return FmtText + return fmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -84,29 +95,40 @@ func Negotiate(h http.Header) Format { // temporary and will disappear once FmtOpenMetrics is fully supported and as // such may be negotiated by the normal Negotiate function. func NegotiateIncludingOpenMetrics(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return fmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return fmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return fmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return fmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { - if ver == OpenMetricsVersion_1_0_0 { - return FmtOpenMetrics_1_0_0 + switch ver { + case OpenMetricsVersion_1_0_0: + return fmtOpenMetrics_1_0_0 + escapingScheme + default: + return fmtOpenMetrics_0_0_1 + escapingScheme } - return FmtOpenMetrics_0_0_1 } } - return FmtText + return fmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All @@ -115,44 +137,48 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { // for FmtOpenMetrics, but a future (breaking) release will add the Close method // to the Encoder interface directly. The current version of the Encoder // interface is kept for backwards compatibility. +// In cases where the Format does not allow for UTF-8 names, the global +// NameEscapingScheme will be applied. func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: + escapingScheme := format.ToEscapingScheme() + + switch format.FormatType() { + case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) + _, err := protodelim.MarshalTo(w, v) return err }, close: func() error { return nil }, } - case FmtProtoCompact: + case TypeProtoCompact: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) + _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String()) return err }, close: func() error { return nil }, } - case FmtProtoText: + case TypeProtoText: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, prototext.Format(v)) + _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme))) return err }, close: func() error { return nil }, } - case FmtText: + case TypeTextPlain: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) + _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, } - case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0: + case TypeOpenMetrics: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, v) + _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index c4cb20f0..6fc9555e 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -14,30 +14,154 @@ // Package expfmt contains tools for reading and writing Prometheus metrics. package expfmt +import ( + "strings" + + "github.com/prometheus/common/model" +) + // Format specifies the HTTP content type of the different wire protocols. type Format string -// Constants to assemble the Content-Type values for the different wire protocols. +// Constants to assemble the Content-Type values for the different wire +// protocols. The Content-Type strings here are all for the legacy exposition +// formats, where valid characters for metric names and label names are limited. +// Support for arbitrary UTF-8 characters in those names is already partially +// implemented in this module (see model.ValidationScheme), but to actually use +// it on the wire, new content-type strings will have to be agreed upon and +// added here. const ( TextVersion = "0.0.4" ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Note that these + // values are now unexported. If code was relying on comparisons to these + // constants, instead use FormatType(). + fmtUnknown Format = `` + fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + fmtProtoDelim Format = protoFmt + ` encoding=delimited` + fmtProtoText Format = protoFmt + ` encoding=text` + fmtProtoCompact Format = protoFmt + ` encoding=compact-text` + fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( hdrContentType = "Content-Type" hdrAccept = "Accept" ) + +// FormatType is a Go enum representing the overall category for the given +// Format. As the number of Format permutations increases, doing basic string +// comparisons are not feasible, so this enum captures the most useful +// high-level attribute of the Format string. +type FormatType int + +const ( + TypeUnknown = iota + TypeProtoCompact + TypeProtoDelim + TypeProtoText + TypeTextPlain + TypeOpenMetrics +) + +// NewFormat generates a new Format from the type provided. Mostly used for +// tests, most Formats should be generated as part of content negotiation in +// encode.go. +func NewFormat(t FormatType) Format { + switch t { + case TypeProtoCompact: + return fmtProtoCompact + case TypeProtoDelim: + return fmtProtoDelim + case TypeProtoText: + return fmtProtoText + case TypeTextPlain: + return fmtText + case TypeOpenMetrics: + return fmtOpenMetrics_1_0_0 + default: + return fmtUnknown + } +} + +// FormatType deduces an overall FormatType for the given format. +func (f Format) FormatType() FormatType { + toks := strings.Split(string(f), ";") + if len(toks) < 2 { + return TypeUnknown + } + + params := make(map[string]string) + for i, t := range toks { + if i == 0 { + continue + } + args := strings.Split(t, "=") + if len(args) != 2 { + continue + } + params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1]) + } + + switch strings.TrimSpace(toks[0]) { + case ProtoType: + if params["proto"] != ProtoProtocol { + return TypeUnknown + } + switch params["encoding"] { + case "delimited": + return TypeProtoDelim + case "text": + return TypeProtoText + case "compact-text": + return TypeProtoCompact + default: + return TypeUnknown + } + case OpenMetricsType: + if params["charset"] != "utf-8" { + return TypeUnknown + } + return TypeOpenMetrics + case "text/plain": + v, ok := params["version"] + if !ok { + return TypeTextPlain + } + if v == TextVersion { + return TypeTextPlain + } + return TypeUnknown + default: + return TypeUnknown + } +} + +// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the +// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid +// "escaping" term exists, that will be used. Otherwise, the global default will +// be returned. +func (format Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(format), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + continue + } + key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) + if key == model.EscapingKey { + scheme, err := model.ToEscapingScheme(value) + if err != nil { + return model.NameEscapingScheme + } + return scheme + } + } + return model.NameEscapingScheme +} diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 21cdddcf..5622578e 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -35,6 +35,18 @@ import ( // sanity checks. If the input contains duplicate metrics or invalid metric or // label names, the conversion will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This function fulfills the type 'expfmt.encoder'. // // Note that OpenMetrics requires a final `# EOF` line. Since this function acts @@ -98,7 +110,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, shortName) written += n if err != nil { return @@ -124,7 +136,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, shortName) written += n if err != nil { return @@ -303,21 +315,9 @@ func writeOpenMetricsSample( floatValue float64, intValue uint64, useIntValue bool, exemplar *dto.Exemplar, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeOpenMetricsLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -365,27 +365,58 @@ func writeOpenMetricsSample( return written, nil } -// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float -// in OpenMetrics style. -func writeOpenMetricsLabelPairs( +// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but +// formats the float in OpenMetrics style. +func writeOpenMetricsNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces, quoted. + if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -451,7 +482,7 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0) written += n if err != nil { return written, err diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 2946b8f1..f9b8265a 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -62,6 +62,18 @@ var ( // contains duplicate metrics or invalid metric or label names, the conversion // will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This method fulfills the type 'prometheus.encoder'. func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { // Fail-fast checks. @@ -98,7 +110,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -124,7 +136,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -280,21 +292,9 @@ func writeSample( additionalLabelName string, additionalLabelValue float64, value float64, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -330,32 +330,64 @@ func writeSample( return written, nil } -// writeLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'w'. An empty slice in combination with an empty -// string 'additionalLabelName' results in nothing being written. Otherwise, the -// label pairs are written, escaped as required by the text format, and enclosed -// in '{...}'. The function returns the number of bytes written and any error -// encountered. -func writeLabelPairs( +// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given metric name and additional label pair into text formatted as +// required by the text format and writes it to 'w'. An empty slice in +// combination with an empty string 'additionalLabelName' results in nothing +// being written. Otherwise, the label pairs are written, escaped as required by +// the text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. If the metric name is not +// legacy-valid, it will be put inside the brackets as well. Legacy-invalid +// label names will also be quoted. +func writeNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces. + if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -462,3 +494,27 @@ func writeInt(w enhancedWriter, i int64) (int, error) { numBufPool.Put(bp) return written, err } + +// writeName writes a string as-is if it complies with the legacy naming +// scheme, or escapes it in double quotes if not. +func writeName(w enhancedWriter, name string) (int, error) { + if model.IsValidLegacyMetricName(model.LabelValue(name)) { + return w.WriteString(name) + } + var written int + var err error + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + var n int + n, err = writeEscapedString(w, name, true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 35db1cc9..26490211 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -16,6 +16,7 @@ package expfmt import ( "bufio" "bytes" + "errors" "fmt" "io" "math" @@ -24,8 +25,9 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" + + "github.com/prometheus/common/model" ) // A stateFn is a function that represents a state in a state machine. By @@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF // stream. Turn this error into something nicer and more // meaningful. (io.EOF is often used as a signal for the legitimate end // of an input stream.) - if p.err == io.EOF { + if p.err != nil && errors.Is(p.err, io.EOF) { p.parseError("unexpected end of input stream") } return p.metricFamiliesByName, p.err @@ -146,7 +148,7 @@ func (p *TextParser) startOfLine() stateFn { // which is not an error but the signal that we are done. // Any other error that happens to align with the start of // a line is still an error. - if p.err == io.EOF { + if errors.Is(p.err, io.EOF) { p.err = nil } return nil diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 35e739c7..178fdbaf 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -90,13 +90,13 @@ func (a *Alert) Validate() error { return fmt.Errorf("start time must be before end time") } if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) + return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { return fmt.Errorf("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) + return fmt.Errorf("invalid annotations: %w", err) } return nil } diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index ef895633..3317ce22 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,17 +97,25 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. +// IsValid returns true iff name matches the pattern of LabelNameRE for legacy +// names, and iff it's valid UTF-8 if NameValidationScheme is set to +// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the +// check but a much faster hardcoded implementation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false + switch NameValidationScheme { + case LegacyValidation: + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } } + case UTF8Validation: + return utf8.ValidString(string(ln)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } return true } @@ -164,7 +172,7 @@ func (l LabelNames) String() string { // A LabelValue is an associated value for a LabelName. type LabelValue string -// IsValid returns true iff the string is a valid UTF8. +// IsValid returns true iff the string is a valid UTF-8. func (lv LabelValue) IsValid() bool { return utf8.ValidString(string(lv)) } diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go new file mode 100644 index 00000000..447ab8ad --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metadata.go @@ -0,0 +1,28 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// MetricType represents metric type values. +type MetricType string + +const ( + MetricTypeCounter = MetricType("counter") + MetricTypeGauge = MetricType("gauge") + MetricTypeHistogram = MetricType("histogram") + MetricTypeGaugeHistogram = MetricType("gaugehistogram") + MetricTypeSummary = MetricType("summary") + MetricTypeInfo = MetricType("info") + MetricTypeStateset = MetricType("stateset") + MetricTypeUnknown = MetricType("unknown") +) diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 00804b7f..0bd29b3a 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -18,15 +18,84 @@ import ( "regexp" "sort" "strings" + "unicode/utf8" + + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) var ( - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + // NameValidationScheme determines the method of name validation to be used by + // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode + // in isolation from other components that don't support UTF-8 may result in + // bugs or other undefined behavior. This value is intended to be set by + // UTF-8-aware binaries as part of their startup. To avoid need for locking, + // this value should be set once, ideally in an init(), before multiple + // goroutines are started. + NameValidationScheme = LegacyValidation + + // NameEscapingScheme defines the default way that names will be + // escaped when presented to systems that do not support UTF-8 names. If the + // Content-Type "escaping" term is specified, that will override this value. + NameEscapingScheme = ValueEncodingEscaping +) + +// ValidationScheme is a Go enum for determining how metric and label names will +// be validated by this library. +type ValidationScheme int + +const ( + // LegacyValidation is a setting that requirets that metric and label names + // conform to the original Prometheus character requirements described by + // MetricNameRE and LabelNameRE. + LegacyValidation ValidationScheme = iota + + // UTF8Validation only requires that metric and label names be valid UTF-8 + // strings. + UTF8Validation +) + +type EscapingScheme int + +const ( + // NoEscaping indicates that a name will not be escaped. Unescaped names that + // do not conform to the legacy validity check will use a new exposition + // format syntax that will be officially standardized in future versions. + NoEscaping EscapingScheme = iota + + // UnderscoreEscaping replaces all legacy-invalid characters with underscores. + UnderscoreEscaping + + // DotsEscaping is similar to UnderscoreEscaping, except that dots are + // converted to `_dot_` and pre-existing underscores are converted to `__`. + DotsEscaping + + // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid + // characters with the unicode value, surrounded by underscores. Single + // underscores are replaced with double underscores. + ValueEncodingEscaping +) + +const ( + // EscapingKey is the key in an Accept or Content-Type header that defines how + // metric and label names that do not conform to the legacy character + // requirements should be escaped when being scraped by a legacy prometheus + // system. If a system does not explicitly pass an escaping parameter in the + // Accept header, the default NameEscapingScheme will be used. + EscapingKey = "escaping" + + // Possible values for Escaping Key: + AllowUTF8 = "allow-utf-8" // No escaping required. + EscapeUnderscores = "underscores" + EscapeDots = "dots" + EscapeValues = "values" ) +// MetricNameRE is a regular expression matching valid metric +// names. Note that the IsValidMetricName function performs the same +// check but faster than a match with this regular expression. +var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + // A Metric is similar to a LabelSet, but the key difference is that a Metric is // a singleton and refers to one and only one stream of samples. type Metric LabelSet @@ -86,17 +155,302 @@ func (m Metric) FastFingerprint() Fingerprint { return LabelSet(m).FastFingerprint() } -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE +// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is +// selected. +func IsValidMetricName(n LabelValue) bool { + switch NameValidationScheme { + case LegacyValidation: + return IsValidLegacyMetricName(n) + case UTF8Validation: + if len(n) == 0 { + return false + } + return utf8.ValidString(string(n)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) + } +} + +// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the +// legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n LabelValue) bool { if len(n) == 0 { return false } for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + if !isValidLegacyRune(b, i) { return false } } return true } + +// EscapeMetricFamily escapes the given metric names and labels with the given +// escaping scheme. Returns a new object that uses the same pointers to fields +// when possible and creates new escaped versions so as not to mutate the +// input. +func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily { + if v == nil { + return nil + } + + if scheme == NoEscaping { + return v + } + + out := &dto.MetricFamily{ + Help: v.Help, + Type: v.Type, + } + + // If the name is nil, copy as-is, don't try to escape. + if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + out.Name = v.Name + } else { + out.Name = proto.String(EscapeName(v.GetName(), scheme)) + } + for _, m := range v.Metric { + if !metricNeedsEscaping(m) { + out.Metric = append(out.Metric, m) + continue + } + + escaped := &dto.Metric{ + Gauge: m.Gauge, + Counter: m.Counter, + Summary: m.Summary, + Untyped: m.Untyped, + Histogram: m.Histogram, + TimestampMs: m.TimestampMs, + } + + for _, l := range m.Label { + if l.GetName() == MetricNameLabel { + if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(MetricNameLabel), + Value: proto.String(EscapeName(l.GetValue(), scheme)), + }) + continue + } + if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(EscapeName(l.GetName(), scheme)), + Value: l.Value, + }) + } + out.Metric = append(out.Metric, escaped) + } + return out +} + +func metricNeedsEscaping(m *dto.Metric) bool { + for _, l := range m.Label { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + return true + } + if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + return true + } + } + return false +} + +const ( + lowerhex = "0123456789abcdef" +) + +// EscapeName escapes the incoming name according to the provided escaping +// scheme. Depending on the rules of escaping, this may cause no change in the +// string that is returned. (Especially NoEscaping, which by definition is a +// noop). This function does not do any validation of the name. +func EscapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + var escaped strings.Builder + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + if IsValidLegacyMetricName(LabelValue(name)) { + return name + } + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case DotsEscaping: + // Do not early return for legacy valid names, we still escape underscores. + for i, b := range name { + if b == '_' { + escaped.WriteString("__") + } else if b == '.' { + escaped.WriteString("_dot_") + } else if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case ValueEncodingEscaping: + if IsValidLegacyMetricName(LabelValue(name)) { + return name + } + escaped.WriteString("U__") + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else if !utf8.ValidRune(b) { + escaped.WriteString("_FFFD_") + } else if b < 0x100 { + escaped.WriteRune('_') + for s := 4; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } else if b < 0x10000 { + escaped.WriteRune('_') + for s := 12; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } + } + return escaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +// lower function taken from strconv.atoi +func lower(c byte) byte { + return c | ('x' - 'X') +} + +// UnescapeName unescapes the incoming name according to the provided escaping +// scheme if possible. Some schemes are partially or totally non-roundtripable. +// If any error is enountered, returns the original input. +func UnescapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + // It is not possible to unescape from underscore replacement. + return name + case DotsEscaping: + name = strings.ReplaceAll(name, "_dot_", ".") + name = strings.ReplaceAll(name, "__", "_") + return name + case ValueEncodingEscaping: + escapedName, found := strings.CutPrefix(name, "U__") + if !found { + return name + } + + var unescaped strings.Builder + TOP: + for i := 0; i < len(escapedName); i++ { + // All non-underscores are treated normally. + if escapedName[i] != '_' { + unescaped.WriteByte(escapedName[i]) + continue + } + i++ + if i >= len(escapedName) { + return name + } + // A double underscore is a single underscore. + if escapedName[i] == '_' { + unescaped.WriteByte('_') + continue + } + // We think we are in a UTF-8 code, process it. + var utf8Val uint + for j := 0; i < len(escapedName); j++ { + // This is too many characters for a utf8 value. + if j > 4 { + return name + } + // Found a closing underscore, convert to a rune, check validity, and append. + if escapedName[i] == '_' { + utf8Rune := rune(utf8Val) + if !utf8.ValidRune(utf8Rune) { + return name + } + unescaped.WriteRune(utf8Rune) + continue TOP + } + r := lower(escapedName[i]) + utf8Val *= 16 + if r >= '0' && r <= '9' { + utf8Val += uint(r) - '0' + } else if r >= 'a' && r <= 'f' { + utf8Val += uint(r) - 'a' + 10 + } else { + return name + } + i++ + } + // Didn't find closing underscore, invalid. + return name + } + return unescaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +func isValidLegacyRune(b rune, i int) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0) +} + +func (e EscapingScheme) String() string { + switch e { + case NoEscaping: + return AllowUTF8 + case UnderscoreEscaping: + return EscapeUnderscores + case DotsEscaping: + return EscapeDots + case ValueEncodingEscaping: + return EscapeValues + default: + panic(fmt.Sprintf("unknown format scheme %d", e)) + } +} + +func ToEscapingScheme(s string) (EscapingScheme, error) { + if s == "" { + return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + } + switch s { + case AllowUTF8: + return NoEscaping, nil + case EscapeUnderscores: + return UnderscoreEscaping, nil + case EscapeDots: + return DotsEscaping, nil + case EscapeValues: + return ValueEncodingEscaping, nil + default: + return NoEscaping, fmt.Errorf("unknown format scheme " + s) + } +} diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go index 8762b13c..dc8a0026 100644 --- a/vendor/github.com/prometheus/common/model/signature.go +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -22,10 +22,8 @@ import ( // when calculating their combined hash value (aka signature aka fingerprint). const SeparatorByte byte = 255 -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) +// cache the signature of an empty label set. +var emptyLabelSignature = hashNew() // LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a // given label set. (Collisions are possible but unlikely if the number of label diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index bb99889d..910b0b71 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -81,7 +81,7 @@ func (s *Silence) Validate() error { } for _, m := range s.Matchers { if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) + return fmt.Errorf("invalid matcher: %w", err) } } if s.StartsAt.IsZero() { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 9eb44041..8050637d 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -21,14 +21,12 @@ import ( "strings" ) -var ( - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) +// ZeroSample is the pseudo zero-value of Sample used to signal a +// non-existing sample. It is a Sample with timestamp Earliest, value 0.0, +// and metric nil. Note that the natural zero value of Sample has a timestamp +// of 0, which is possible to appear in a real Sample and thus not suitable +// to signal a non-existing Sample. +var ZeroSample = Sample{Timestamp: Earliest} // Sample is a sample pair associated with a metric. A single sample must either // define Value or Histogram but not both. Histogram == nil implies the Value @@ -274,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error { value, err := strconv.ParseFloat(f, 64) if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) + return fmt.Errorf("error parsing sample value: %w", err) } s.Value = SampleValue(value) return nil diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index 0f615a70..ae35cc2a 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -20,14 +20,12 @@ import ( "strconv" ) -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} -) +// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a +// non-existing sample pair. It is a SamplePair with timestamp Earliest and +// value 0.0. Note that the natural zero value of SamplePair has a timestamp +// of 0, which is possible to appear in a real SamplePair and thus not +// suitable to signal a non-existing SamplePair. +var ZeroSamplePair = SamplePair{Timestamp: Earliest} // A SampleValue is a representation of a value for a given sample at a given // time. diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 0ce7ea46..062a2818 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.53.3 +GOLANGCI_LINT_VERSION ?= v1.54.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 13d74e39..134767d6 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build netbsd || openbsd || solaris || windows || nostatfs -// +build netbsd openbsd solaris windows nostatfs +//go:build !freebsd && !linux +// +build !freebsd,!linux package procfs diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go index bee15144..80df79c3 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs -// +build !netbsd,!openbsd,!solaris,!windows,!nostatfs +//go:build freebsd || linux +// +build freebsd linux package procfs diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 852c8c4a..9d8af6db 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -44,6 +44,14 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 + + // kernel version >= 4.14 MaxLen + // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 + fieldTransport11RDMAMaxLen = 28 + + // kernel version <= 4.2 MinLen + // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 + fieldTransport11RDMAMinLen = 20 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -233,6 +241,33 @@ type NFSTransportStats struct { // A running counter, incremented on each request as the current size of the // pending queue. CumulativePendingQueue uint64 + + // Stats below only available with stat version 1.1. + // Transport over RDMA + + // accessed when sending a call + ReadChunkCount uint64 + WriteChunkCount uint64 + ReplyChunkCount uint64 + TotalRdmaRequest uint64 + + // rarely accessed error counters + PullupCopyCount uint64 + HardwayRegisterCount uint64 + FailedMarshalCount uint64 + BadReplyCount uint64 + MrsRecovered uint64 + MrsOrphaned uint64 + MrsAllocated uint64 + EmptySendctxQ uint64 + + // accessed when receiving a reply + TotalRdmaReply uint64 + FixupCopyCount uint64 + ReplyWaitsForSend uint64 + LocalInvNeeded uint64 + NomsgCallCount uint64 + BcallCount uint64 } // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice @@ -587,14 +622,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats expectedLength = fieldTransport11TCPLen } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen + } else if protocol == "rdma" { + expectedLength = fieldTransport11RDMAMinLen } else { return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } - if len(ss) != expectedLength { - return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v", ErrFileParse, ss) + if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || + (protocol == "rdma" && len(ss) < expectedLength) { + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q", ErrFileParse, statVersion) + return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay @@ -604,7 +642,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) + // + // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen + ns := make([]uint64, fieldTransport11RDMAMaxLen+3) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -622,9 +662,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // we set them to 0 here. if protocol == "udp" { ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } else if protocol == "tcp" { + ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) + } else if protocol == "rdma" { + ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } return &NFSTransportStats{ + // NFS xprt over tcp or udp Protocol: protocol, Port: ns[0], Bind: ns[1], @@ -636,8 +681,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats BadTransactionIDs: ns[7], CumulativeActiveRequests: ns[8], CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], + + // NFS xprt over tcp or udp + // And statVersion 1.1 + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + + // NFS xprt over rdma + // And stat Version 1.1 + ReadChunkCount: ns[13], + WriteChunkCount: ns[14], + ReplyChunkCount: ns[15], + TotalRdmaRequest: ns[16], + PullupCopyCount: ns[17], + HardwayRegisterCount: ns[18], + FailedMarshalCount: ns[19], + BadReplyCount: ns[20], + MrsRecovered: ns[21], + MrsOrphaned: ns[22], + MrsAllocated: ns[23], + EmptySendctxQ: ns[24], + TotalRdmaReply: ns[25], + FixupCopyCount: ns[26], + ReplyWaitsForSend: ns[27], + LocalInvNeeded: ns[28], + NomsgCallCount: ns[29], + BcallCount: ns[30], }, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index 4b7933e4..fa761b35 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -26,6 +26,7 @@ var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rIno = regexp.MustCompile(`^ino:\s+(\d+)$`) rInotify = regexp.MustCompile(`^inotify`) rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) ) @@ -40,6 +41,8 @@ type ProcFDInfo struct { Flags string // Mount point ID MntID string + // Inode number + Ino string // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) InotifyInfos []InotifyInfo } @@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { return nil, err } - var text, pos, flags, mntid string + var text, pos, flags, mntid, ino string var inotify []InotifyInfo scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { flags = rFlags.FindStringSubmatch(text)[1] } else if rMntID.MatchString(text) { mntid = rMntID.FindStringSubmatch(text)[1] + } else if rIno.MatchString(text) { + ino = rIno.FindStringSubmatch(text)[1] } else if rInotify.MatchString(text) { newInotify, err := parseInotifyInfo(text) if err != nil { @@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { Pos: pos, Flags: flags, MntID: mntid, + Ino: ino, InotifyInfos: inotify, } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 727549a1..7e75c286 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -63,17 +63,17 @@ type ProcMap struct { // parseDevice parses the device token of a line and converts it to a dev_t // (mkdev) like structure. func parseDevice(s string) (uint64, error) { - toks := strings.Split(s, ":") - if len(toks) < 2 { - return 0, fmt.Errorf("%w: unexpected number of fields, expected: 2, got: %q", ErrFileParse, len(toks)) + i := strings.Index(s, ":") + if i == -1 { + return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s) } - major, err := strconv.ParseUint(toks[0], 16, 0) + major, err := strconv.ParseUint(s[0:i], 16, 0) if err != nil { return 0, err } - minor, err := strconv.ParseUint(toks[1], 16, 0) + minor, err := strconv.ParseUint(s[i+1:], 16, 0) if err != nil { return 0, err } @@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) { // parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { - toks := strings.Split(s, "-") - if len(toks) < 2 { - return 0, 0, fmt.Errorf("%w: invalid address", ErrFileParse) + idx := strings.Index(s, "-") + if idx == -1 { + return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s) } - saddr, err := parseAddress(toks[0]) + saddr, err := parseAddress(s[0:idx]) if err != nil { return 0, 0, err } - eaddr, err := parseAddress(toks[1]) + eaddr, err := parseAddress(s[idx+1:]) if err != nil { return 0, 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index c055d075..46307f57 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -23,7 +23,7 @@ import ( ) // ProcStatus provides status information about the process, -// read from /proc/[pid]/stat. +// read from /proc/[pid]/status. type ProcStatus struct { // The process ID. PID int @@ -32,6 +32,8 @@ type ProcStatus struct { // Thread group ID. TGID int + // List of Pid namespace. + NSpids []uint64 // Peak virtual memory size. VmPeak uint64 // nolint:revive @@ -127,6 +129,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt copy(s.UIDs[:], strings.Split(vString, "\t")) case "Gid": copy(s.GIDs[:], strings.Split(vString, "\t")) + case "NSpid": + s.NSpids = calcNSPidsList(vString) case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -200,3 +204,18 @@ func calcCpusAllowedList(cpuString string) []uint64 { sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) return g } + +func calcNSPidsList(nspidsString string) []uint64 { + s := strings.Split(nspidsString, " ") + var nspids []uint64 + + for _, nspid := range s { + nspid, _ := strconv.ParseUint(nspid, 10, 64) + if nspid == 0 { + continue + } + nspids = append(nspids, nspid) + } + + return nspids +} diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/build_conversion.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/build_conversion.go deleted file mode 100644 index c6638dd7..00000000 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/build_conversion.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The Shipwright Contributors -// -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/shipwright-io/build/pkg/webhook" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -// ensure v1alpha1 implements the Conversion interface -var _ webhook.Conversion = (*Build)(nil) - -func (src *Build) ConvertTo(_ context.Context, _ *unstructured.Unstructured) error { - return fmt.Errorf("v1alpha1 is the current storage version, nothing to convert to") -} - -func (src *Build) ConvertFrom(_ context.Context, _ *unstructured.Unstructured) error { - return fmt.Errorf("v1alpha1 is the current storage version, nothing to convert from") -} diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/build_types.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/build_types.go index e9f37890..36da3dc6 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/build_types.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/build_types.go @@ -108,6 +108,17 @@ const ( AnnotationBuildVerifyRepository = BuildDomain + "/verify.repository" ) +const ( + // OutputImageZeroTimestamp indicates that the UNIX timestamp 0 is to be used + OutputImageZeroTimestamp = "Zero" + + // OutputImageSourceTimestamp indicates that the timestamp of the respective source it to be used + OutputImageSourceTimestamp = "SourceTimestamp" + + // OutputImageBuildTimestamp indicates that the current timestamp of the build run itself is to be used + OutputImageBuildTimestamp = "BuildTimestamp" +) + // BuildSpec defines the desired state of Build type BuildSpec struct { // Source refers to the Git repository containing the @@ -233,6 +244,16 @@ type Image struct { // // +optional Labels map[string]string `json:"labels,omitempty"` + + // Timestamp references the optional image timestamp to be set, valid values are: + // - "Zero", to set 00:00:00 UTC on 1 January 1970 + // - "SourceTimestamp", to set the source timestamp dereived from the input source + // - "BuildTimestamp", to set the timestamp of the current build itself + // - Parsable integer number defined as the epoch seconds + // - or nil/empty to not set any specific timestamp + // + // +optional + Timestamp *string `json:"timestamp,omitempty"` } // BuildStatus defines the observed state of Build @@ -257,7 +278,6 @@ type BuildStatus struct { // Build is the Schema representing a Build definition // +kubebuilder:subresource:status -// +kubebuilder:storageversion // +kubebuilder:resource:path=builds,scope=Namespaced // +kubebuilder:printcolumn:name="Registered",type="string",JSONPath=".status.registered",description="The register status of the Build" // +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.reason",description="The reason of the registered Build, either an error or succeed message" diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildrun_conversion.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildrun_conversion.go deleted file mode 100644 index 5e07c04b..00000000 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildrun_conversion.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The Shipwright Contributors -// -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/shipwright-io/build/pkg/webhook" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -// ensure v1alpha1 implements the Conversion interface -var _ webhook.Conversion = (*BuildRun)(nil) - -func (src *BuildRun) ConvertTo(_ context.Context, _ *unstructured.Unstructured) error { - return fmt.Errorf("v1alpha1 is the current storage version, nothing to convert to") -} - -func (src *BuildRun) ConvertFrom(_ context.Context, _ *unstructured.Unstructured) error { - return fmt.Errorf("v1alpha1 is the current storage version, nothing to convert from") -} diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildrun_types.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildrun_types.go index f8458f82..57155266 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildrun_types.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildrun_types.go @@ -117,6 +117,14 @@ type SourceResult struct { // // +optional Bundle *BundleSourceResult `json:"bundle,omitempty"` + + // Timestamp holds the timestamp of the source, which + // depends on the actual source type and could range from + // being the commit timestamp or the fileystem timestamp + // of the most recent source file in the working directory + // + // +optional + Timestamp *metav1.Time `json:"timestamp,omitempty"` } // BundleSourceResult holds the results emitted from the bundle source @@ -230,7 +238,6 @@ type ServiceAccount struct { // BuildRun is the Schema representing an instance of build execution // +kubebuilder:subresource:status -// +kubebuilder:storageversion // +kubebuilder:resource:path=buildruns,scope=Namespaced,shortName=br;brs // +kubebuilder:printcolumn:name="Succeeded",type="string",JSONPath=".status.conditions[?(@.type==\"Succeeded\")].status",description="The Succeeded status of the BuildRun" // +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.conditions[?(@.type==\"Succeeded\")].reason",description="The Succeeded reason of the BuildRun" diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildstrategy_conversion.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildstrategy_conversion.go deleted file mode 100644 index fd0de79c..00000000 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildstrategy_conversion.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The Shipwright Contributors -// -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/shipwright-io/build/pkg/webhook" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -// ensure v1alpha1 implements the Conversion interface -var _ webhook.Conversion = (*BuildStrategy)(nil) - -func (src *BuildStrategy) ConvertTo(_ context.Context, _ *unstructured.Unstructured) error { - return fmt.Errorf("v1alpha1 is the current storage version, nothing to convert to") -} - -func (src *BuildStrategy) ConvertFrom(_ context.Context, _ *unstructured.Unstructured) error { - return fmt.Errorf("v1alpha1 is the current storage version, nothing to convert from") -} diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildstrategy_types.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildstrategy_types.go index 395e459f..3bb73739 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildstrategy_types.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/buildstrategy_types.go @@ -26,7 +26,6 @@ const ( // BuildStrategy is the Schema representing a strategy in the namespace scope to build images from source code. // +kubebuilder:subresource:status -// +kubebuilder:storageversion // +kubebuilder:resource:path=buildstrategies,scope=Namespaced,shortName=bs;bss type BuildStrategy struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/clusterbuildstrategy_conversion.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/clusterbuildstrategy_conversion.go deleted file mode 100644 index 21b18562..00000000 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/clusterbuildstrategy_conversion.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The Shipwright Contributors -// -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/shipwright-io/build/pkg/webhook" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -// ensure v1alpha1 implements the Conversion interface -var _ webhook.Conversion = (*ClusterBuildStrategy)(nil) - -func (src *ClusterBuildStrategy) ConvertTo(_ context.Context, _ *unstructured.Unstructured) error { - return fmt.Errorf("v1alpha1 is the current storage version, nothing to convert to") -} - -func (src *ClusterBuildStrategy) ConvertFrom(_ context.Context, _ *unstructured.Unstructured) error { - return fmt.Errorf("v1alpha1 is the current storage version, nothing to convert from") -} diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/clusterbuildstrategy_types.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/clusterbuildstrategy_types.go index b247167b..ae41c2a8 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/clusterbuildstrategy_types.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/clusterbuildstrategy_types.go @@ -27,7 +27,6 @@ const ( // ClusterBuildStrategy is the Schema representing a strategy in the cluster scope to build images from source code. // +kubebuilder:subresource:status -// +kubebuilder:storageversion // +kubebuilder:resource:path=clusterbuildstrategies,scope=Cluster,shortName=cbs;cbss type ClusterBuildStrategy struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go index a546cc9e..58ce75da 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go @@ -904,6 +904,11 @@ func (in *Image) DeepCopyInto(out *Image) { (*out)[key] = val } } + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = new(string) + **out = **in + } return } @@ -1123,6 +1128,10 @@ func (in *SourceResult) DeepCopyInto(out *SourceResult) { *out = new(BundleSourceResult) **out = **in } + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = (*in).DeepCopy() + } return } diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/build_conversion.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/build_conversion.go index c246cd52..2562a541 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/build_conversion.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/build_conversion.go @@ -27,7 +27,7 @@ var _ webhook.Conversion = (*Build)(nil) // ConvertTo converts this Build object to v1alpha1 format. func (src *Build) ConvertTo(ctx context.Context, obj *unstructured.Unstructured) error { - ctxlog.Debug(ctx, "Converting Build from beta to alpha", "namespace", src.Namespace, "name", src.Name) + ctxlog.Info(ctx, "converting Build from beta to alpha", "namespace", src.Namespace, "name", src.Name) var alphaBuild v1alpha1.Build @@ -38,6 +38,12 @@ func (src *Build) ConvertTo(ctx context.Context, obj *unstructured.Unstructured) src.Spec.ConvertTo(&alphaBuild.Spec) + alphaBuild.Status = v1alpha1.BuildStatus{ + Registered: src.Status.Registered, + Reason: (*v1alpha1.BuildReason)(src.Status.Reason), + Message: src.Status.Message, + } + // convert annotation-controlled features if src.Spec.Retention != nil && src.Spec.Retention.AtBuildDeletion != nil { // We must create a new Map as otherwise the addition is not kept @@ -48,14 +54,6 @@ func (src *Build) ConvertTo(ctx context.Context, obj *unstructured.Unstructured) alphaBuild.ObjectMeta.Annotations[v1alpha1.AnnotationBuildRunDeletion] = strconv.FormatBool(*src.Spec.Retention.AtBuildDeletion) } - // convert OCIArtifact to Bundle - if src.Spec.Source.OCIArtifact != nil { - alphaBuild.Spec.Source.BundleContainer = &v1alpha1.BundleContainer{ - Image: src.Spec.Source.OCIArtifact.Image, - Prune: (*v1alpha1.PruneOption)(src.Spec.Source.OCIArtifact.Prune), - } - } - mapito, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&alphaBuild) if err != nil { ctxlog.Error(ctx, err, "failed structuring the newObject") @@ -77,7 +75,7 @@ func (src *Build) ConvertFrom(ctx context.Context, obj *unstructured.Unstructure ctxlog.Error(ctx, err, "failed unstructuring the convertedObject") } - ctxlog.Debug(ctx, "Converting Build from alpha to beta", "namespace", alphaBuild.Namespace, "name", alphaBuild.Name) + ctxlog.Info(ctx, "converting Build from alpha to beta", "namespace", alphaBuild.Namespace, "name", alphaBuild.Name) src.ObjectMeta = alphaBuild.ObjectMeta src.TypeMeta = alphaBuild.TypeMeta @@ -105,40 +103,44 @@ func (src *Build) ConvertFrom(ctx context.Context, obj *unstructured.Unstructure func (dest *BuildSpec) ConvertFrom(orig *v1alpha1.BuildSpec) error { // Handle BuildSpec Source - specSource := Source{} // only interested on spec.sources as long as an item of the list // is of the type LocalCopy. Otherwise, we move into bundle or git types. index, isLocal := v1alpha1.IsLocalCopyType(orig.Sources) if isLocal { - specSource.Type = LocalType - specSource.LocalSource = &Local{ - Name: orig.Sources[index].Name, - Timeout: orig.Sources[index].Timeout, + dest.Source = &Source{ + Type: LocalType, + Local: &Local{ + Name: orig.Sources[index].Name, + Timeout: orig.Sources[index].Timeout, + }, + ContextDir: orig.Source.ContextDir, } - } else { - if orig.Source.BundleContainer != nil { - specSource.Type = OCIArtifactType - specSource.OCIArtifact = &OCIArtifact{ + } else if orig.Source.BundleContainer != nil { + dest.Source = &Source{ + Type: OCIArtifactType, + OCIArtifact: &OCIArtifact{ Image: orig.Source.BundleContainer.Image, Prune: (*PruneOption)(orig.Source.BundleContainer.Prune), - } - if orig.Source.Credentials != nil { - specSource.OCIArtifact.PullSecret = &orig.Source.Credentials.Name - } - } else { - specSource.Type = GitType - specSource.GitSource = &Git{ - URL: orig.Source.URL, + }, + ContextDir: orig.Source.ContextDir, + } + if orig.Source.Credentials != nil { + dest.Source.OCIArtifact.PullSecret = &orig.Source.Credentials.Name + } + } else if orig.Source.URL != nil { + dest.Source = &Source{ + Type: GitType, + Git: &Git{ + URL: *orig.Source.URL, Revision: orig.Source.Revision, - } - if orig.Source.Credentials != nil { - specSource.GitSource.CloneSecret = &orig.Source.Credentials.Name - } + }, + ContextDir: orig.Source.ContextDir, + } + if orig.Source.Credentials != nil { + dest.Source.Git.CloneSecret = &orig.Source.Credentials.Name } } - specSource.ContextDir = orig.Source.ContextDir - dest.Source = specSource // Handle BuildSpec Triggers if orig.Trigger != nil { @@ -164,7 +166,7 @@ func (dest *BuildSpec) ConvertFrom(orig *v1alpha1.BuildSpec) error { } //handle spec.Dockerfile migration - if orig.Dockerfile != nil { + if orig.Dockerfile != nil && *orig.Dockerfile != "" { dockerfileParam := ParamValue{ Name: "dockerfile", SingleValue: &SingleValue{ @@ -194,6 +196,7 @@ func (dest *BuildSpec) ConvertFrom(orig *v1alpha1.BuildSpec) error { dest.Output.Annotations = orig.Output.Annotations dest.Output.Labels = orig.Output.Labels + dest.Output.Timestamp = orig.Output.Timestamp // Handle BuildSpec Timeout dest.Timeout = orig.Timeout @@ -226,11 +229,11 @@ func (dest *BuildSpec) ConvertFrom(orig *v1alpha1.BuildSpec) error { func (dest *BuildSpec) ConvertTo(bs *v1alpha1.BuildSpec) error { // Handle BuildSpec Sources or Source - if dest.Source.Type == LocalType && dest.Source.LocalSource != nil { + if dest.Source != nil && dest.Source.Type == LocalType && dest.Source.Local != nil { bs.Sources = append(bs.Sources, v1alpha1.BuildSource{ - Name: dest.Source.LocalSource.Name, + Name: dest.Source.Local.Name, Type: v1alpha1.LocalCopy, - Timeout: dest.Source.LocalSource.Timeout, + Timeout: dest.Source.Local.Timeout, }) } else { bs.Source = getAlphaBuildSource(*dest) @@ -286,6 +289,7 @@ func (dest *BuildSpec) ConvertTo(bs *v1alpha1.BuildSpec) error { } bs.Output.Annotations = dest.Output.Annotations bs.Output.Labels = dest.Output.Labels + bs.Output.Timestamp = dest.Output.Timestamp // Handle BuildSpec Timeout bs.Timeout = dest.Timeout @@ -413,6 +417,11 @@ func convertToBetaTriggers(orig *v1alpha1.TriggerWhen) TriggerWhen { func getAlphaBuildSource(src BuildSpec) v1alpha1.Source { source := v1alpha1.Source{} + + if src.Source == nil { + return source + } + var credentials corev1.LocalObjectReference var revision *string @@ -428,14 +437,14 @@ func getAlphaBuildSource(src BuildSpec) v1alpha1.Source { Prune: (*v1alpha1.PruneOption)(src.Source.OCIArtifact.Prune), } default: - if src.Source.GitSource != nil && src.Source.GitSource.CloneSecret != nil { + if src.Source.Git != nil && src.Source.Git.CloneSecret != nil { credentials = corev1.LocalObjectReference{ - Name: *src.Source.GitSource.CloneSecret, + Name: *src.Source.Git.CloneSecret, } } - if src.Source.GitSource != nil { - source.URL = src.Source.GitSource.URL - revision = src.Source.GitSource.Revision + if src.Source.Git != nil { + source.URL = &src.Source.Git.URL + revision = src.Source.Git.Revision } } diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/build_types.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/build_types.go index 738fa05e..39a05e6c 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/build_types.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/build_types.go @@ -16,6 +16,8 @@ type BuildReason string const ( // SucceedStatus indicates that all validations Succeeded SucceedStatus BuildReason = "Succeeded" + // UnknownBuildStrategyKind indicates that neither namespace-scope or cluster-scope strategy kind was used + UnknownBuildStrategyKind BuildReason = "UnknownBuildStrategyKind" // BuildStrategyNotFound indicates that a namespaced-scope strategy was not found in the namespace BuildStrategyNotFound BuildReason = "BuildStrategyNotFound" // ClusterBuildStrategyNotFound indicates that a cluster-scope strategy was not found @@ -70,6 +72,10 @@ const ( TriggerInvalidImage BuildReason = "TriggerInvalidImage" // TriggerInvalidPipeline indicates the trigger type Pipeline is invalid TriggerInvalidPipeline BuildReason = "TriggerInvalidPipeline" + // OutputTimestampNotSupported indicates that an unsupported output timestamp setting was used + OutputTimestampNotSupported BuildReason = "OutputTimestampNotSupported" + // OutputTimestampNotValid indicates that the output timestamp value is not valid + OutputTimestampNotValid BuildReason = "OutputTimestampNotValid" // AllValidationsSucceeded indicates a Build was successfully validated AllValidationsSucceeded = "all validations succeeded" @@ -105,12 +111,25 @@ const ( AnnotationBuildVerifyRepository = BuildDomain + "/verify.repository" ) +const ( + // OutputImageZeroTimestamp indicates that the UNIX timestamp 0 is to be used + OutputImageZeroTimestamp = "Zero" + + // OutputImageSourceTimestamp indicates that the timestamp of the respective source it to be used + OutputImageSourceTimestamp = "SourceTimestamp" + + // OutputImageBuildTimestamp indicates that the current timestamp of the build run itself is to be used + OutputImageBuildTimestamp = "BuildTimestamp" +) + // BuildSpec defines the desired state of Build type BuildSpec struct { // Source refers to the location where the source code is, // this could be a git repository, a local source or an oci // artifact - Source Source `json:"source"` + // + // +optional + Source *Source `json:"source"` // Trigger defines the scenarios where a new build should be triggered. // @@ -137,6 +156,7 @@ type BuildSpec struct { Timeout *metav1.Duration `json:"timeout,omitempty"` // Env contains additional environment variables that should be passed to the build container + // // +optional Env []corev1.EnvVar `json:"env,omitempty"` @@ -147,6 +167,7 @@ type BuildSpec struct { // Volumes contains volume Overrides of the BuildStrategy volumes in case those are allowed // to be overridden. Must only contain volumes that exist in the corresponding BuildStrategy + // // +optional Volumes []BuildVolume `json:"volumes,omitempty"` } @@ -196,6 +217,16 @@ type Image struct { // // +optional Labels map[string]string `json:"labels,omitempty"` + + // Timestamp references the optional image timestamp to be set, valid values are: + // - "Zero", to set 00:00:00 UTC on 1 January 1970 + // - "SourceTimestamp", to set the source timestamp dereived from the input source + // - "BuildTimestamp", to set the timestamp of the current build itself + // - Parsable integer number defined as the epoch seconds + // - or nil/empty to not set any specific timestamp + // + // +optional + Timestamp *string `json:"timestamp,omitempty"` } // BuildStatus defines the observed state of Build @@ -220,6 +251,7 @@ type BuildStatus struct { // Build is the Schema representing a Build definition // +kubebuilder:subresource:status +// +kubebuilder:storageversion // +kubebuilder:resource:path=builds,scope=Namespaced // +kubebuilder:printcolumn:name="Registered",type="string",JSONPath=".status.registered",description="The register status of the Build" // +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.reason",description="The reason of the registered Build, either an error or succeed message" @@ -274,3 +306,22 @@ type BuildRetention struct { func init() { SchemeBuilder.Register(&Build{}, &BuildList{}) } + +// GetSourceCredentials returns the secret name for a Build Source +func (b Build) GetSourceCredentials() *string { + if b.Spec.Source == nil { + return nil + } + + switch b.Spec.Source.Type { + case OCIArtifactType: + if b.Spec.Source.OCIArtifact != nil && b.Spec.Source.OCIArtifact.PullSecret != nil { + return b.Spec.Source.OCIArtifact.PullSecret + } + default: + if b.Spec.Source.Git != nil && b.Spec.Source.Git.CloneSecret != nil { + return b.Spec.Source.Git.CloneSecret + } + } + return nil +} diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildrun_conversion.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildrun_conversion.go index 41180e9a..7d0c2b6c 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildrun_conversion.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildrun_conversion.go @@ -21,7 +21,7 @@ var _ webhook.Conversion = (*BuildRun)(nil) // To Alpha func (src *BuildRun) ConvertTo(ctx context.Context, obj *unstructured.Unstructured) error { - ctxlog.Debug(ctx, "Converting BuildRun from beta to alpha", "namespace", src.Namespace, "name", src.Name) + ctxlog.Info(ctx, "converting BuildRun from beta to alpha", "namespace", src.Namespace, "name", src.Name) var alphaBuildRun v1alpha1.BuildRun @@ -30,9 +30,9 @@ func (src *BuildRun) ConvertTo(ctx context.Context, obj *unstructured.Unstructur alphaBuildRun.ObjectMeta = src.ObjectMeta // BuildRunSpec BuildSpec - if src.Spec.Build.Build != nil { + if src.Spec.Build.Spec != nil { newBuildSpec := v1alpha1.BuildSpec{} - if err := src.Spec.Build.Build.ConvertTo(&newBuildSpec); err != nil { + if err := src.Spec.Build.Spec.ConvertTo(&newBuildSpec); err != nil { return err } alphaBuildRun.Spec.BuildSpec = &newBuildSpec @@ -43,11 +43,11 @@ func (src *BuildRun) ConvertTo(ctx context.Context, obj *unstructured.Unstructur } // BuildRunSpec Sources - if src.Spec.Source != nil && src.Spec.Source.Type == LocalType && src.Spec.Source.LocalSource != nil { + if src.Spec.Source != nil && src.Spec.Source.Type == LocalType && src.Spec.Source.Local != nil { alphaBuildRun.Spec.Sources = append(alphaBuildRun.Spec.Sources, v1alpha1.BuildSource{ - Name: src.Spec.Source.LocalSource.Name, + Name: src.Spec.Source.Local.Name, Type: v1alpha1.LocalCopy, - Timeout: src.Spec.Source.LocalSource.Timeout, + Timeout: src.Spec.Source.Local.Timeout, }) } @@ -108,6 +108,71 @@ func (src *BuildRun) ConvertTo(ctx context.Context, obj *unstructured.Unstructur }) } + // BuildRun Status + var sourceStatus []v1alpha1.SourceResult + if src.Status.Source != nil && src.Status.Source.Git != nil { + // Note: v1alpha contains a Name field under the SourceResult + // object, which we dont set here. + sourceStatus = append(sourceStatus, v1alpha1.SourceResult{ + Name: "default", + Git: (*v1alpha1.GitSourceResult)(src.Status.Source.Git), + Timestamp: src.Status.Source.Timestamp, + }) + } + + if src.Status.Source != nil && src.Status.Source.OciArtifact != nil { + // Note: v1alpha contains a Name field under the SourceResult + // object, which we dont set here. + sourceStatus = append(sourceStatus, v1alpha1.SourceResult{ + Name: "default", + Bundle: (*v1alpha1.BundleSourceResult)(src.Status.Source.OciArtifact), + Timestamp: src.Status.Source.Timestamp, + }) + } + + var conditions []v1alpha1.Condition + for _, c := range src.Status.Conditions { + ct := v1alpha1.Condition{ + Type: v1alpha1.Type(c.Type), + Status: c.Status, + LastTransitionTime: c.LastTransitionTime, + Reason: c.Reason, + Message: c.Message, + } + conditions = append(conditions, ct) + } + + alphaBuildRun.Status = v1alpha1.BuildRunStatus{ + Sources: sourceStatus, + Output: (*v1alpha1.Output)(src.Status.Output), + Conditions: conditions, + LatestTaskRunRef: src.Status.TaskRunName, + StartTime: src.Status.StartTime, + CompletionTime: src.Status.CompletionTime, + } + + if src.Status.FailureDetails != nil { + alphaBuildRun.Status.FailureDetails = &v1alpha1.FailureDetails{ + Reason: src.Status.FailureDetails.Reason, + Message: src.Status.FailureDetails.Message, + } + } + + if src.Status.FailureDetails != nil && src.Status.FailureDetails.Location != nil { + alphaBuildRun.Status.FailureDetails.Location = &v1alpha1.FailedAt{ + Pod: src.Status.FailureDetails.Location.Pod, + Container: src.Status.FailureDetails.Location.Container, + } + //nolint:staticcheck // SA1019 we want to give users some time to adopt to failureDetails + alphaBuildRun.Status.FailedAt = alphaBuildRun.Status.FailureDetails.Location + } + + aux := &v1alpha1.BuildSpec{} + if src.Status.BuildSpec != nil { + src.Status.BuildSpec.ConvertTo(aux) + alphaBuildRun.Status.BuildSpec = aux + } + mapito, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&alphaBuildRun) if err != nil { ctxlog.Error(ctx, err, "failed structuring the newObject") @@ -129,7 +194,7 @@ func (src *BuildRun) ConvertFrom(ctx context.Context, obj *unstructured.Unstruct ctxlog.Error(ctx, err, "failed unstructuring the buildrun convertedObject") } - ctxlog.Debug(ctx, "Converting BuildRun from alpha to beta", "namespace", alphaBuildRun.Namespace, "name", alphaBuildRun.Name) + ctxlog.Info(ctx, "converting BuildRun from alpha to beta", "namespace", alphaBuildRun.Namespace, "name", alphaBuildRun.Name) src.ObjectMeta = alphaBuildRun.ObjectMeta src.TypeMeta = alphaBuildRun.TypeMeta @@ -142,6 +207,7 @@ func (src *BuildRun) ConvertFrom(ctx context.Context, obj *unstructured.Unstruct sourceStatus = &SourceResult{ Git: (*GitSourceResult)(s.Git), OciArtifact: (*OciArtifactSourceResult)(s.Bundle), + Timestamp: s.Timestamp, } } @@ -189,8 +255,8 @@ func (dest *BuildRunSpec) ConvertFrom(orig *v1alpha1.BuildRunSpec) error { // BuildRunSpec BuildSpec if orig.BuildSpec != nil { - dest.Build.Build = &BuildSpec{} - dest.Build.Build.ConvertFrom(orig.BuildSpec) + dest.Build.Spec = &BuildSpec{} + dest.Build.Spec.ConvertFrom(orig.BuildSpec) } if orig.BuildRef != nil { dest.Build.Name = &orig.BuildRef.Name @@ -202,7 +268,7 @@ func (dest *BuildRunSpec) ConvertFrom(orig *v1alpha1.BuildRunSpec) error { if isLocal { dest.Source = &BuildRunSource{ Type: LocalType, - LocalSource: &Local{ + Local: &Local{ Name: orig.Sources[index].Name, Timeout: orig.Sources[index].Timeout, }, @@ -211,6 +277,9 @@ func (dest *BuildRunSpec) ConvertFrom(orig *v1alpha1.BuildRunSpec) error { if orig.ServiceAccount != nil { dest.ServiceAccount = orig.ServiceAccount.Name + if orig.ServiceAccount.Generate != nil && *orig.ServiceAccount.Generate { + dest.ServiceAccount = pointer.String(".generate") + } } dest.Timeout = orig.Timeout diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildrun_types.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildrun_types.go index 7a40e822..428da6c3 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildrun_types.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildrun_types.go @@ -21,10 +21,10 @@ const ( ) type ReferencedBuild struct { - // Build refers to an embedded build specification + // Spec refers to an embedded build specification // // +optional - Build *BuildSpec `json:"spec,omitempty"` + Spec *BuildSpec `json:"spec,omitempty"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names // @@ -122,6 +122,14 @@ type SourceResult struct { // // +optional OciArtifact *OciArtifactSourceResult `json:"ociArtifact,omitempty"` + + // Timestamp holds the timestamp of the source, which + // depends on the actual source type and could range from + // being the commit timestamp or the fileystem timestamp + // of the most recent source file in the working directory + // + // +optional + Timestamp *metav1.Time `json:"timestamp,omitempty"` } // OciArtifactSourceResult holds the results emitted from the bundle source @@ -213,6 +221,7 @@ type FailureDetails struct { // BuildRun is the Schema representing an instance of build execution // +kubebuilder:subresource:status +// +kubebuilder:storageversion // +kubebuilder:resource:path=buildruns,scope=Namespaced,shortName=br;brs // +kubebuilder:printcolumn:name="Succeeded",type="string",JSONPath=".status.conditions[?(@.type==\"Succeeded\")].status",description="The Succeeded status of the BuildRun" // +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.conditions[?(@.type==\"Succeeded\")].reason",description="The Succeeded reason of the BuildRun" diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildstrategy_conversion.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildstrategy_conversion.go index 1b0715fa..7c546d50 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildstrategy_conversion.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildstrategy_conversion.go @@ -22,7 +22,7 @@ var _ webhook.Conversion = (*BuildStrategy)(nil) // ConvertTo converts this object to its v1alpha1 equivalent func (src *BuildStrategy) ConvertTo(ctx context.Context, obj *unstructured.Unstructured) error { - ctxlog.Debug(ctx, "Converting BuildStrategy from beta to alpha", "namespace", src.Namespace, "name", src.Name) + ctxlog.Info(ctx, "converting BuildStrategy from beta to alpha", "namespace", src.Namespace, "name", src.Name) var bs v1alpha1.BuildStrategy bs.TypeMeta = src.TypeMeta @@ -154,7 +154,7 @@ func (src *BuildStrategy) ConvertFrom(ctx context.Context, obj *unstructured.Uns ctxlog.Error(ctx, err, "failed unstructuring the buildrun convertedObject") } - ctxlog.Debug(ctx, "Converting BuildStrategy from alpha to beta", "namespace", bs.Namespace, "name", bs.Name) + ctxlog.Info(ctx, "converting BuildStrategy from alpha to beta", "namespace", bs.Namespace, "name", bs.Name) src.ObjectMeta = bs.ObjectMeta src.TypeMeta = bs.TypeMeta diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildstrategy_types.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildstrategy_types.go index 2d1ece05..1613164c 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildstrategy_types.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/buildstrategy_types.go @@ -26,6 +26,7 @@ const ( // BuildStrategy is the Schema representing a strategy in the namespace scope to build images from source code. // +kubebuilder:subresource:status +// +kubebuilder:storageversion // +kubebuilder:resource:path=buildstrategies,scope=Namespaced,shortName=bs;bss type BuildStrategy struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/clusterbuildstrategy_conversion.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/clusterbuildstrategy_conversion.go index 6a15007f..67470a30 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/clusterbuildstrategy_conversion.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/clusterbuildstrategy_conversion.go @@ -19,7 +19,7 @@ var _ webhook.Conversion = (*ClusterBuildStrategy)(nil) // ConvertTo converts this object to its v1alpha1 equivalent func (src *ClusterBuildStrategy) ConvertTo(ctx context.Context, obj *unstructured.Unstructured) error { - ctxlog.Debug(ctx, "Converting ClusterBuildStrategy from beta to alpha", "namespace", src.Namespace, "name", src.Name) + ctxlog.Info(ctx, "converting ClusterBuildStrategy from beta to alpha", "namespace", src.Namespace, "name", src.Name) var bs v1alpha1.ClusterBuildStrategy bs.TypeMeta = src.TypeMeta @@ -47,7 +47,7 @@ func (src *ClusterBuildStrategy) ConvertFrom(ctx context.Context, obj *unstructu ctxlog.Error(ctx, err, "failed unstructuring the buildrun convertedObject") } - ctxlog.Debug(ctx, "Converting ClusterBuildStrategy from alpha to beta", "namespace", cbs.Namespace, "name", cbs.Name) + ctxlog.Info(ctx, "converting ClusterBuildStrategy from alpha to beta", "namespace", cbs.Namespace, "name", cbs.Name) src.ObjectMeta = cbs.ObjectMeta src.TypeMeta = cbs.TypeMeta diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/clusterbuildstrategy_types.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/clusterbuildstrategy_types.go index 42df5846..c6c612c3 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/clusterbuildstrategy_types.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/clusterbuildstrategy_types.go @@ -27,6 +27,7 @@ const ( // ClusterBuildStrategy is the Schema representing a strategy in the cluster scope to build images from source code. // +kubebuilder:subresource:status +// +kubebuilder:storageversion // +kubebuilder:resource:path=clusterbuildstrategies,scope=Cluster,shortName=cbs;cbss type ClusterBuildStrategy struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/source.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/source.go index dad73167..8187d532 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/source.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/source.go @@ -44,9 +44,7 @@ type Local struct { // Git describes the git repository to pull type Git struct { // URL describes the URL of the Git repository. - // - // +optional - URL *string `json:"url,omitempty"` + URL string `json:"url"` // Revision describes the Git revision (e.g., branch, tag, commit SHA, // etc.) to fetch. @@ -76,6 +74,7 @@ type OCIArtifact struct { // // +optional Prune *PruneOption `json:"prune,omitempty"` + // PullSecret references a Secret that contains credentials to access // the repository. // @@ -83,42 +82,41 @@ type OCIArtifact struct { PullSecret *string `json:"pullSecret,omitempty"` } -// Source describes the Git source repository to fetch. +// Source describes the build source type to fetch. type Source struct { - // Type is the BuildSource qualifier, the type of the data-source. - // - // +optional - Type BuildSourceType `json:"type,omitempty"` + // Type is the BuildSource qualifier, the type of the source. + Type BuildSourceType `json:"type"` // ContextDir is a path to subfolder in the repo. Optional. // // +optional ContextDir *string `json:"contextDir,omitempty"` - // OCIArtifact + // OCIArtifact contains the details for the source of type OCIArtifact // // +optional OCIArtifact *OCIArtifact `json:"ociArtifact,omitempty"` - // GitSource + // Git contains the details for the source of type Git // // +optional - GitSource *Git `json:"git,omitempty"` + Git *Git `json:"git,omitempty"` - // LocalSource + // Local contains the details for the source of type Local // // +optional - LocalSource *Local `json:"local,omitempty"` + Local *Local `json:"local,omitempty"` } // BuildRunSource describes the local source to use type BuildRunSource struct { - // Type is the BuildRunSource qualifier, the type of the data-source. - // Only LocalType is supported. + // Type is the BuildRunSource qualifier, the type of the source. + // Only Local is supported. // - // +optional - Type BuildSourceType `json:"type,omitempty"` - // LocalSource + Type BuildSourceType `json:"type"` + + // Local contains the details for the source of type Local // - LocalSource *Local `json:"local,omitempty"` + // +optional + Local *Local `json:"local,omitempty"` } diff --git a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/zz_generated.deepcopy.go index 4c3dc1df..9c1ce78f 100644 --- a/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/shipwright-io/build/pkg/apis/build/v1beta1/zz_generated.deepcopy.go @@ -207,8 +207,8 @@ func (in *BuildRunRetention) DeepCopy() *BuildRunRetention { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildRunSource) DeepCopyInto(out *BuildRunSource) { *out = *in - if in.LocalSource != nil { - in, out := &in.LocalSource, &out.LocalSource + if in.Local != nil { + in, out := &in.Local, &out.Local *out = new(Local) (*in).DeepCopyInto(*out) } @@ -352,7 +352,11 @@ func (in *BuildRunStatus) DeepCopy() *BuildRunStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { *out = *in - in.Source.DeepCopyInto(&out.Source) + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(Source) + (*in).DeepCopyInto(*out) + } if in.Trigger != nil { in, out := &in.Trigger, &out.Trigger *out = new(Trigger) @@ -738,11 +742,6 @@ func (in *FailureDetails) DeepCopy() *FailureDetails { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Git) DeepCopyInto(out *Git) { *out = *in - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } if in.Revision != nil { in, out := &in.Revision, &out.Revision *out = new(string) @@ -809,6 +808,11 @@ func (in *Image) DeepCopyInto(out *Image) { (*out)[key] = val } } + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = new(string) + **out = **in + } return } @@ -999,8 +1003,8 @@ func (in *Parameter) DeepCopy() *Parameter { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReferencedBuild) DeepCopyInto(out *ReferencedBuild) { *out = *in - if in.Build != nil { - in, out := &in.Build, &out.Build + if in.Spec != nil { + in, out := &in.Spec, &out.Spec *out = new(BuildSpec) (*in).DeepCopyInto(*out) } @@ -1066,13 +1070,13 @@ func (in *Source) DeepCopyInto(out *Source) { *out = new(OCIArtifact) (*in).DeepCopyInto(*out) } - if in.GitSource != nil { - in, out := &in.GitSource, &out.GitSource + if in.Git != nil { + in, out := &in.Git, &out.Git *out = new(Git) (*in).DeepCopyInto(*out) } - if in.LocalSource != nil { - in, out := &in.LocalSource, &out.LocalSource + if in.Local != nil { + in, out := &in.Local, &out.Local *out = new(Local) (*in).DeepCopyInto(*out) } @@ -1102,6 +1106,10 @@ func (in *SourceResult) DeepCopyInto(out *SourceResult) { *out = new(OciArtifactSourceResult) **out = **in } + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = (*in).DeepCopy() + } return } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/contexts.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/contexts.go deleted file mode 100644 index ed1e1b2e..00000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/contexts.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "context" -) - -// isSubstituted is used for associating the parameter substitution inside the context.Context. -type isSubstituted struct{} - -// validatePropagatedVariables is used for deciding whether to validate or skip parameters and workspaces inside the contect.Context. -type validatePropagatedVariables string - -// WithinSubstituted is used to note that it is calling within -// the context of a substitute variable operation. -func WithinSubstituted(ctx context.Context) context.Context { - return context.WithValue(ctx, isSubstituted{}, true) -} - -// IsSubstituted indicates that the variables have been substituted. -func IsSubstituted(ctx context.Context) bool { - return ctx.Value(isSubstituted{}) != nil -} - -// SkipValidationDueToPropagatedParametersAndWorkspaces sets the context to skip validation of parameters when embedded vs referenced to true or false. -func SkipValidationDueToPropagatedParametersAndWorkspaces(ctx context.Context, skip bool) context.Context { - return context.WithValue(ctx, validatePropagatedVariables("ValidatePropagatedParameterVariablesAndWorkspaces"), !skip) -} - -// ValidateParameterVariablesAndWorkspaces indicates if validation of paramater variables and workspaces should be conducted. -func ValidateParameterVariablesAndWorkspaces(ctx context.Context) bool { - return ctx.Value(validatePropagatedVariables("ValidatePropagatedParameterVariablesAndWorkspaces")) == true -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go index 968dae25..b17f891b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go @@ -59,6 +59,9 @@ const ( defaultResolverTypeKey = "default-resolver-type" ) +// DefaultConfig holds all the default configurations for the config. +var DefaultConfig, _ = NewDefaultsFromMap(map[string]string{}) + // Defaults holds the default configurations // +k8s:deepcopy-gen=true type Defaults struct { @@ -67,7 +70,7 @@ type Defaults struct { DefaultManagedByLabelValue string DefaultPodTemplate *pod.Template DefaultAAPodTemplate *pod.AffinityAssistantTemplate - DefaultCloudEventsSink string + DefaultCloudEventsSink string // Deprecated. Use the events package instead DefaultTaskRunWorkspaceBinding string DefaultMaxMatrixCombinationsCount int DefaultForbiddenEnv []string diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/events.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/events.go new file mode 100644 index 00000000..fca87c4a --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/events.go @@ -0,0 +1,185 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "os" + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +const ( + // FormatTektonV1 represents the "v1" events in Tekton custom format + FormatTektonV1 EventFormat = "tektonv1" + + // DefaultSink is the default value for "sink" + DefaultSink = "" + + formatsKey = "formats" + sinkKey = "sink" +) + +var ( + // TODO(afrittoli): only one valid format for now, more to come + // See TEP-0137 https://github.com/tektoncd/community/pull/1028 + validFormats = EventFormats{FormatTektonV1: struct{}{}} + + // DefaultFormat is the default value for "formats" + DefaultFormats = EventFormats{FormatTektonV1: struct{}{}} + + // DefaultConfig holds all the default configurations for the config. + DefaultEvents, _ = NewEventsFromMap(map[string]string{}) +) + +// Events holds the events configurations +// +k8s:deepcopy-gen=true +type Events struct { + Sink string + Formats EventFormats +} + +// EventFormat is a single event format +type EventFormat string + +// EventFormats is a set of event formats +type EventFormats map[EventFormat]struct{} + +// String is a string representation of an EventFormat +func (ef EventFormat) String() string { + return string(ef) +} + +// IsValid returns true is the EventFormat one of the valid ones +func (ef EventFormat) IsValid() bool { + _, ok := validFormats[ef] + return ok +} + +// String is a string representation of an EventFormats +func (efs EventFormats) String() string { + // Make an array of map keys + keys := make([]string, len(efs)) + + i := 0 + for k := range efs { + keys[i] = k.String() + i++ + } + // Sorting helps with testing + sort.Strings(keys) + + // Build a comma separated list + return strings.Join(keys, ",") +} + +// Equals defines identity between EventFormats +func (efs EventFormats) Equals(other EventFormats) bool { + if len(efs) != len(other) { + return false + } + for key := range efs { + if _, ok := other[key]; !ok { + return false + } + } + return true +} + +// ParseEventFormats converts a comma separated list into a EventFormats set +func ParseEventFormats(formats string) (EventFormats, error) { + // An empty string is not a valid configuration + if formats == "" { + return EventFormats{}, fmt.Errorf("formats cannot be empty") + } + stringFormats := strings.Split(formats, ",") + var eventFormats EventFormats = make(map[EventFormat]struct{}, len(stringFormats)) + for _, format := range stringFormats { + if !EventFormat(format).IsValid() { + return EventFormats{}, fmt.Errorf("invalid format: %s", format) + } + // If already in the map (duplicate), fail + if _, ok := eventFormats[EventFormat(format)]; ok { + return EventFormats{}, fmt.Errorf("duplicate format: %s", format) + } + eventFormats[EventFormat(format)] = struct{}{} + } + return eventFormats, nil +} + +// GetEventsConfigName returns the name of the configmap containing all +// feature flags. +func GetEventsConfigName() string { + if e := os.Getenv("CONFIG_EVENTS_NAME"); e != "" { + return e + } + return "config-events" +} + +// NewEventsFromMap returns a Config given a map corresponding to a ConfigMap +func NewEventsFromMap(cfgMap map[string]string) (*Events, error) { + // for any string field with no extra validation + setField := func(key string, defaultValue string, field *string) { + if cfg, ok := cfgMap[key]; ok { + *field = cfg + } else { + *field = defaultValue + } + } + + events := Events{} + err := setFormats(cfgMap, DefaultFormats, &events.Formats) + if err != nil { + return nil, err + } + setField(sinkKey, DefaultSink, &events.Sink) + return &events, nil +} + +func setFormats(cfgMap map[string]string, defaultValue EventFormats, field *EventFormats) error { + value := defaultValue + if cfg, ok := cfgMap[formatsKey]; ok { + v, err := ParseEventFormats(cfg) + if err != nil { + return err + } + value = v + } + *field = value + return nil +} + +// NewEventsFromConfigMap returns a Config for the given configmap +func NewEventsFromConfigMap(config *corev1.ConfigMap) (*Events, error) { + return NewEventsFromMap(config.Data) +} + +// Equals returns true if two Configs are identical +func (cfg *Events) Equals(other *Events) bool { + if cfg == nil && other == nil { + return true + } + + if cfg == nil || other == nil { + return false + } + + return other.Sink == cfg.Sink && + other.Formats.Equals(cfg.Formats) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go index 699a6551..8e0054da 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go @@ -42,6 +42,14 @@ const ( // IgnoreNoMatchPolicy is the value used for "trusted-resources-verification-no-match-policy" to skip verification // when no matching policies are found IgnoreNoMatchPolicy = "ignore" + // CoscheduleWorkspaces is the value used for "coschedule" to coschedule PipelineRun Pods sharing the same PVC workspaces to the same node + CoscheduleWorkspaces = "workspaces" + // CoschedulePipelineRuns is the value used for "coschedule" to coschedule all PipelineRun Pods to the same node + CoschedulePipelineRuns = "pipelineruns" + // CoscheduleIsolatePipelineRun is the value used for "coschedule" to coschedule all PipelineRun Pods to the same node, and only allows one PipelineRun to run on a node at a time + CoscheduleIsolatePipelineRun = "isolate-pipelinerun" + // CoscheduleDisabled is the value used for "coschedule" to disabled PipelineRun Pods coschedule + CoscheduleDisabled = "disabled" // ResultExtractionMethodTerminationMessage is the value used for "results-from" as a way to extract results from tasks using kubernetes termination message. ResultExtractionMethodTerminationMessage = "termination-message" // ResultExtractionMethodSidecarLogs is the value used for "results-from" as a way to extract results from tasks using sidecar logs. @@ -59,7 +67,7 @@ const ( // DefaultEnableTektonOciBundles is the default value for "enable-tekton-oci-bundles". DefaultEnableTektonOciBundles = false // DefaultEnableAPIFields is the default value for "enable-api-fields". - DefaultEnableAPIFields = StableAPIFields + DefaultEnableAPIFields = BetaAPIFields // DefaultSendCloudEventsForRuns is the default value for "send-cloudevents-for-runs". DefaultSendCloudEventsForRuns = false // EnforceNonfalsifiabilityWithSpire is the value used for "enable-nonfalsifiability" when SPIRE is used to enable non-falsifiability. @@ -71,11 +79,15 @@ const ( // DefaultNoMatchPolicyConfig is the default value for "trusted-resources-verification-no-match-policy". DefaultNoMatchPolicyConfig = IgnoreNoMatchPolicy // DefaultEnableProvenanceInStatus is the default value for "enable-provenance-status". - DefaultEnableProvenanceInStatus = false + DefaultEnableProvenanceInStatus = true // DefaultResultExtractionMethod is the default value for ResultExtractionMethod DefaultResultExtractionMethod = ResultExtractionMethodTerminationMessage // DefaultMaxResultSize is the default value in bytes for the size of a result DefaultMaxResultSize = 4096 + // DefaultSetSecurityContext is the default value for "set-security-context" + DefaultSetSecurityContext = false + // DefaultCoschedule is the default value for coschedule + DefaultCoschedule = CoscheduleWorkspaces disableAffinityAssistantKey = "disable-affinity-assistant" disableCredsInitKey = "disable-creds-init" @@ -90,8 +102,13 @@ const ( enableProvenanceInStatus = "enable-provenance-in-status" resultExtractionMethod = "results-from" maxResultSize = "max-result-size" + setSecurityContextKey = "set-security-context" + coscheduleKey = "coschedule" ) +// DefaultFeatureFlags holds all the default configurations for the feature flags configmap. +var DefaultFeatureFlags, _ = NewFeatureFlagsFromMap(map[string]string{}) + // FeatureFlags holds the features configurations // +k8s:deepcopy-gen=true // @@ -116,6 +133,8 @@ type FeatureFlags struct { EnableProvenanceInStatus bool ResultExtractionMethod string MaxResultSize int + SetSecurityContext bool + Coschedule string } // GetFeatureFlagsConfigName returns the name of the configmap containing all @@ -179,7 +198,13 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if err := setEnforceNonFalsifiability(cfgMap, tc.EnableAPIFields, &tc.EnforceNonfalsifiability); err != nil { return nil, err } + if err := setFeature(setSecurityContextKey, DefaultSetSecurityContext, &tc.SetSecurityContext); err != nil { + return nil, err + } + if err := setCoschedule(cfgMap, DefaultCoschedule, tc.DisableAffinityAssistant, &tc.Coschedule); err != nil { + return nil, err + } // Given that they are alpha features, Tekton Bundles and Custom Tasks should be switched on if // enable-api-fields is "alpha". If enable-api-fields is not "alpha" then fall back to the value of // each feature's individual flag. @@ -212,6 +237,29 @@ func setEnabledAPIFields(cfgMap map[string]string, defaultValue string, feature return nil } +// setCoschedule sets the "coschedule" flag based on the content of a given map. +// If the feature gate is invalid or incompatible with `disable-affinity-assistant`, then an error is returned. +func setCoschedule(cfgMap map[string]string, defaultValue string, disabledAffinityAssistant bool, feature *string) error { + value := defaultValue + if cfg, ok := cfgMap[coscheduleKey]; ok { + value = strings.ToLower(cfg) + } + + switch value { + case CoscheduleDisabled, CoscheduleWorkspaces, CoschedulePipelineRuns, CoscheduleIsolatePipelineRun: + // validate that "coschedule" is compatible with "disable-affinity-assistant" + // "coschedule" must be set to "workspaces" when "disable-affinity-assistant" is false + if !disabledAffinityAssistant && value != CoscheduleWorkspaces { + return fmt.Errorf("coschedule value %v is incompatible with %v setting to false", value, disableAffinityAssistantKey) + } + *feature = value + default: + return fmt.Errorf("invalid value for feature flag %q: %q", coscheduleKey, value) + } + + return nil +} + // setEnforceNonFalsifiability sets the "enforce-nonfalsifiability" flag based on the content of a given map. // If the feature gate is invalid, then an error is returned. func setEnforceNonFalsifiability(cfgMap map[string]string, enableAPIFields string, feature *string) error { @@ -223,22 +271,11 @@ func setEnforceNonFalsifiability(cfgMap map[string]string, enableAPIFields strin // validate that "enforce-nonfalsifiability" is set to a valid value switch value { case EnforceNonfalsifiabilityNone, EnforceNonfalsifiabilityWithSpire: - break - default: - return fmt.Errorf("invalid value for feature flag %q: %q", enforceNonfalsifiability, value) - } - - // validate that "enforce-nonfalsifiability" is set to allowed values for stability level - switch enableAPIFields { - case AlphaAPIFields: *feature = value + return nil default: - // Do not consider any form of non-falsifiability enforcement in non-alpha mode - if value != DefaultEnforceNonfalsifiability { - return fmt.Errorf("%q can be set to non-default values (%q) only in alpha", enforceNonfalsifiability, value) - } + return fmt.Errorf("invalid value for feature flag %q: %q", enforceNonfalsifiability, value) } - return nil } // setResultExtractionMethod sets the "results-from" flag based on the content of a given map. @@ -297,46 +334,12 @@ func NewFeatureFlagsFromConfigMap(config *corev1.ConfigMap) (*FeatureFlags, erro return NewFeatureFlagsFromMap(config.Data) } -// EnableAlphaAPIFields enables alpha features in an existing context (for use in testing) -func EnableAlphaAPIFields(ctx context.Context) context.Context { - return setEnableAPIFields(ctx, AlphaAPIFields) -} - -// EnableBetaAPIFields enables beta features in an existing context (for use in testing) -func EnableBetaAPIFields(ctx context.Context) context.Context { - return setEnableAPIFields(ctx, BetaAPIFields) -} - -// EnableStableAPIFields enables stable features in an existing context (for use in testing) -func EnableStableAPIFields(ctx context.Context) context.Context { - return setEnableAPIFields(ctx, StableAPIFields) -} - // GetVerificationNoMatchPolicy returns the "trusted-resources-verification-no-match-policy" value func GetVerificationNoMatchPolicy(ctx context.Context) string { return FromContextOrDefaults(ctx).FeatureFlags.VerificationNoMatchPolicy } -// CheckAlphaOrBetaAPIFields return true if the enable-api-fields is either set to alpha or set to beta -func CheckAlphaOrBetaAPIFields(ctx context.Context) bool { - cfg := FromContextOrDefaults(ctx) - return cfg.FeatureFlags.EnableAPIFields == AlphaAPIFields || cfg.FeatureFlags.EnableAPIFields == BetaAPIFields -} - // IsSpireEnabled checks if non-falsifiable provenance is enforced through SPIRE func IsSpireEnabled(ctx context.Context) bool { return FromContextOrDefaults(ctx).FeatureFlags.EnforceNonfalsifiability == EnforceNonfalsifiabilityWithSpire } - -func setEnableAPIFields(ctx context.Context, want string) context.Context { - featureFlags, _ := NewFeatureFlagsFromMap(map[string]string{ - "enable-api-fields": want, - }) - cfg := &Config{ - Defaults: &Defaults{ - DefaultTimeoutMinutes: 60, - }, - FeatureFlags: featureFlags, - } - return ToContext(ctx, cfg) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go index 5b369909..9422ec5e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go @@ -82,6 +82,9 @@ const ( DurationPipelinerunTypeLastValue = "lastvalue" ) +// DefaultMetrics holds all the default configurations for the metrics. +var DefaultMetrics, _ = newMetricsFromMap(map[string]string{}) + // Metrics holds the configurations for the metrics // +k8s:deepcopy-gen=true type Metrics struct { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/spire_config.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/spire_config.go index 7ad507f2..4a293f04 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/spire_config.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/spire_config.go @@ -47,6 +47,9 @@ const ( SpireNodeAliasPrefixDefault = "/tekton-node/" ) +// DefaultSpire hols all the default configurations for the spire. +var DefaultSpire, _ = NewSpireConfigFromMap(map[string]string{}) + // NewSpireConfigFromMap creates a Config from the supplied map func NewSpireConfigFromMap(data map[string]string) (*sc.SpireConfig, error) { cfg := &sc.SpireConfig{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go index 9cb15bdf..76307196 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go @@ -32,6 +32,7 @@ type Config struct { FeatureFlags *FeatureFlags Metrics *Metrics SpireConfig *sc.SpireConfig + Events *Events } // FromContext extracts a Config from the provided context. @@ -49,16 +50,13 @@ func FromContextOrDefaults(ctx context.Context) *Config { if cfg := FromContext(ctx); cfg != nil { return cfg } - defaults, _ := NewDefaultsFromMap(map[string]string{}) - featureFlags, _ := NewFeatureFlagsFromMap(map[string]string{}) - metrics, _ := newMetricsFromMap(map[string]string{}) - spireconfig, _ := NewSpireConfigFromMap(map[string]string{}) return &Config{ - Defaults: defaults, - FeatureFlags: featureFlags, - Metrics: metrics, - SpireConfig: spireconfig, + Defaults: DefaultConfig.DeepCopy(), + FeatureFlags: DefaultFeatureFlags.DeepCopy(), + Metrics: DefaultMetrics.DeepCopy(), + SpireConfig: DefaultSpire.DeepCopy(), + Events: DefaultEvents.DeepCopy(), } } @@ -85,6 +83,7 @@ func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value i GetFeatureFlagsConfigName(): NewFeatureFlagsFromConfigMap, GetMetricsConfigName(): NewMetricsFromConfigMap, GetSpireConfigName(): NewSpireConfigFromConfigMap, + GetEventsConfigName(): NewEventsFromConfigMap, }, onAfterStore..., ), @@ -102,20 +101,23 @@ func (s *Store) ToContext(ctx context.Context) context.Context { func (s *Store) Load() *Config { defaults := s.UntypedLoad(GetDefaultsConfigName()) if defaults == nil { - defaults, _ = NewDefaultsFromMap(map[string]string{}) + defaults = DefaultConfig.DeepCopy() } featureFlags := s.UntypedLoad(GetFeatureFlagsConfigName()) if featureFlags == nil { - featureFlags, _ = NewFeatureFlagsFromMap(map[string]string{}) + featureFlags = DefaultFeatureFlags.DeepCopy() } metrics := s.UntypedLoad(GetMetricsConfigName()) if metrics == nil { - metrics, _ = newMetricsFromMap(map[string]string{}) + metrics = DefaultMetrics.DeepCopy() } - spireconfig := s.UntypedLoad(GetSpireConfigName()) if spireconfig == nil { - spireconfig, _ = NewSpireConfigFromMap(map[string]string{}) + spireconfig = DefaultSpire.DeepCopy() + } + events := s.UntypedLoad(GetEventsConfigName()) + if events == nil { + events = DefaultEvents.DeepCopy() } return &Config{ @@ -123,5 +125,6 @@ func (s *Store) Load() *Config { FeatureFlags: featureFlags.(*FeatureFlags).DeepCopy(), Metrics: metrics.(*Metrics).DeepCopy(), SpireConfig: spireconfig.(*sc.SpireConfig).DeepCopy(), + Events: events.(*Events).DeepCopy(), } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go index 15d1070d..0d0bff08 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go @@ -56,6 +56,29 @@ func (in *Defaults) DeepCopy() *Defaults { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Events) DeepCopyInto(out *Events) { + *out = *in + if in.Formats != nil { + in, out := &in.Formats, &out.Formats + *out = make(EventFormats, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Events. +func (in *Events) DeepCopy() *Events { + if in == nil { + return nil + } + out := new(Events) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureFlags) DeepCopyInto(out *FeatureFlags) { *out = *in diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/storagetypes.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/constant.go similarity index 67% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/storagetypes.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/constant.go index edccac25..19604205 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/storagetypes.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/constant.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2023 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,9 +17,6 @@ limitations under the License. package pipeline const ( - // ArtifactStorageBucketType holds the name of the PipelineResource type for a bucket - ArtifactStorageBucketType = "bucket" - - // ArtifactStoragePVCType holds the name of the PipelineResource type for a pvc - ArtifactStoragePVCType = "pvc" + // TektonReservedAnnotationExpr is the expression we use to filter out reserved key in annotation + TektonReservedAnnotationExpr = "(chains.tekton.dev)/.*" ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go index 67fb8a6b..f51a0ac4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go @@ -300,29 +300,17 @@ func (m *Matrix) validateCombinationsCount(ctx context.Context) (errs *apis.Fiel return errs } -// validateParams validates the type of Parameter for Matrix.Params and Matrix.Include.Params -// Matrix.Params must be of type array. Matrix.Include.Params must be of type string. -// validateParams also validates Matrix.Params for a unique list of params +// validateUniqueParams validates Matrix.Params for a unique list of params // and a unique list of params in each Matrix.Include.Params specification -func (m *Matrix) validateParams() (errs *apis.FieldError) { +func (m *Matrix) validateUniqueParams() (errs *apis.FieldError) { if m != nil { if m.HasInclude() { for i, include := range m.Include { errs = errs.Also(include.Params.validateDuplicateParameters().ViaField(fmt.Sprintf("matrix.include[%d].params", i))) - for _, param := range include.Params { - if param.Value.Type != ParamTypeString { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("parameters of type string only are allowed, but got param type %s", string(param.Value.Type)), "").ViaFieldKey("matrix.include.params", param.Name)) - } - } } } if m.HasParams() { errs = errs.Also(m.Params.validateDuplicateParameters().ViaField("matrix.params")) - for _, param := range m.Params { - if param.Value.Type != ParamTypeArray { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("parameters of type array only are allowed, but got param type %s", string(param.Value.Type)), "").ViaFieldKey("matrix.params", param.Name)) - } - } } } return errs diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go index 5915648f..abb49a96 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go @@ -1875,7 +1875,7 @@ func schema_pkg_apis_pipeline_v1_PipelineTask(ref common.ReferenceCallback) comm }, "timeout": { SchemaProps: spec.SchemaProps{ - Description: "Time after which the TaskRun times out. Defaults to 1 hour. Specified TaskRun timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Description: "Time after which the TaskRun times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -3634,7 +3634,7 @@ func schema_pkg_apis_pipeline_v1_TaskRunSpec(ref common.ReferenceCallback) commo }, "timeout": { SchemaProps: spec.SchemaProps{ - Description: "Time after which one retry attempt times out. Defaults to 1 hour. Specified build timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Description: "Time after which one retry attempt times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go index 746cd3d4..b47e45f7 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go @@ -20,7 +20,6 @@ import ( "context" "encoding/json" "fmt" - "regexp" "strings" "github.com/tektoncd/pipeline/pkg/substitution" @@ -29,12 +28,6 @@ import ( "knative.dev/pkg/apis" ) -// exactVariableSubstitutionFormat matches strings that only contain a single reference to result or param variables, but nothing else -// i.e. `$(result.resultname)` is a match, but `foo $(result.resultname)` is not. -const exactVariableSubstitutionFormat = `^\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)$` - -var exactVariableSubstitutionRegex = regexp.MustCompile(exactVariableSubstitutionFormat) - // ParamsPrefix is the prefix used in $(...) expressions referring to parameters const ParamsPrefix = "params" @@ -110,6 +103,51 @@ func (pp *ParamSpec) setDefaultsForProperties() { } } +// getNames returns all the names of the declared parameters +func (ps ParamSpecs) getNames() []string { + var names []string + for _, p := range ps { + names = append(names, p.Name) + } + return names +} + +// sortByType splits the input params into string params, array params, and object params, in that order +func (ps ParamSpecs) sortByType() (ParamSpecs, ParamSpecs, ParamSpecs) { + var stringParams, arrayParams, objectParams ParamSpecs + for _, p := range ps { + switch p.Type { + case ParamTypeArray: + arrayParams = append(arrayParams, p) + case ParamTypeObject: + objectParams = append(objectParams, p) + case ParamTypeString: + fallthrough + default: + stringParams = append(stringParams, p) + } + } + return stringParams, arrayParams, objectParams +} + +// validateNoDuplicateNames returns an error if any of the params have the same name +func (ps ParamSpecs) validateNoDuplicateNames() *apis.FieldError { + names := ps.getNames() + seen := sets.String{} + dups := sets.String{} + var errs *apis.FieldError + for _, n := range names { + if seen.Has(n) { + dups.Insert(n) + } + seen.Insert(n) + } + for n := range dups { + errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", n)) + } + return errs +} + // Param declares an ParamValues to use for the parameter called name. type Param struct { Name string `json:"name"` @@ -150,9 +188,9 @@ func (ps Params) extractParamMapArrVals() map[string][]string { // Params is a list of Param type Params []Param -// extractParamArrayLengths extract and return the lengths of all array params +// ExtractParamArrayLengths extract and return the lengths of all array params // Example of returned value: {"a-array-params": 2,"b-array-params": 2 } -func (ps Params) extractParamArrayLengths() map[string]int { +func (ps Params) ExtractParamArrayLengths() map[string]int { // Collect all array params arrayParamsLengths := make(map[string]int) @@ -178,9 +216,18 @@ func (ps Params) validateDuplicateParameters() (errs *apis.FieldError) { return errs } -// extractParamArrayLengths extract and return the lengths of all array params +// ReplaceVariables applies string, array and object replacements to variables in Params +func (ps Params) ReplaceVariables(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) Params { + params := ps.DeepCopy() + for i := range params { + params[i].Value.ApplyReplacements(stringReplacements, arrayReplacements, objectReplacements) + } + return params +} + +// ExtractDefaultParamArrayLengths extract and return the lengths of all array params // Example of returned value: {"a-array-params": 2,"b-array-params": 2 } -func (ps ParamSpecs) extractParamArrayLengths() map[string]int { +func (ps ParamSpecs) ExtractDefaultParamArrayLengths() map[string]int { // Collect all array params arrayParamsLengths := make(map[string]int) @@ -195,30 +242,6 @@ func (ps ParamSpecs) extractParamArrayLengths() map[string]int { return arrayParamsLengths } -// validateOutofBoundArrayParams validates if the array indexing params are out of bound -// example of arrayIndexingParams: ["$(params.a-array-param[1])", "$(params.b-array-param[2])"] -// example of arrayParamsLengths: {"a-array-params": 2,"b-array-params": 2 } -func validateOutofBoundArrayParams(arrayIndexingParams []string, arrayParamsLengths map[string]int) error { - outofBoundParams := sets.String{} - for _, val := range arrayIndexingParams { - indexString := substitution.ExtractIndexString(val) - idx, _ := substitution.ExtractIndex(indexString) - // this will extract the param name from reference - // e.g. $(params.a-array-param[1]) -> a-array-param - paramName, _, _ := substitution.ExtractVariablesFromString(substitution.TrimArrayIndex(val), "params") - - if paramLength, ok := arrayParamsLengths[paramName[0]]; ok { - if idx >= paramLength { - outofBoundParams.Insert(val) - } - } - } - if outofBoundParams.Len() > 0 { - return fmt.Errorf("non-existent param references:%v", outofBoundParams.List()) - } - return nil -} - // extractArrayIndexingParamRefs takes a string of the form `foo-$(params.array-param[1])-bar` and extracts the portions of the string that reference an element in an array param. // For example, for the string “foo-$(params.array-param[1])-bar-$(params.other-array-param[2])-$(params.string-param)`, // it would return ["$(params.array-param[1])", "$(params.other-array-param[2])"]. @@ -443,7 +466,7 @@ func (paramValues ParamValue) MarshalJSON() ([]byte, error) { func (paramValues *ParamValue) ApplyReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) { switch paramValues.Type { case ParamTypeArray: - var newArrayVal []string + newArrayVal := []string{} for _, v := range paramValues.ArrayVal { newArrayVal = append(newArrayVal, substitution.ApplyArrayReplacements(v, stringReplacements, arrayReplacements)...) } @@ -475,7 +498,7 @@ func (paramValues *ParamValue) applyOrCorrect(stringReplacements map[string]stri // trim the head "$(" and the tail ")" or "[*])" // i.e. get "params.name" from "$(params.name)" or "$(params.name[*])" - trimedStringVal := StripStarVarSubExpression(stringVal) + trimedStringVal := substitution.StripStarVarSubExpression(stringVal) // if the stringVal is a reference to a string param if _, ok := stringReplacements[trimedStringVal]; ok { @@ -497,11 +520,6 @@ func (paramValues *ParamValue) applyOrCorrect(stringReplacements map[string]stri } } -// StripStarVarSubExpression strips "$(target[*])"" to get "target" -func StripStarVarSubExpression(s string) string { - return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(s, "$("), ")"), "[*]") -} - // NewStructuredValues creates an ParamValues of type ParamTypeString or ParamTypeArray, based on // how many inputs are given (>1 input will create an array, not string). func NewStructuredValues(value string, values ...string) *ParamValue { @@ -573,23 +591,23 @@ func validateParamStringValue(param Param, prefix string, paramNames sets.String // validateStringVariable validates the normal string fields that can only accept references to string param or individual keys of object param func validateStringVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError { - errs := substitution.ValidateVariableP(value, prefix, stringVars) + errs := substitution.ValidateNoReferencesToUnknownVariables(value, prefix, stringVars) errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys)) - return errs.Also(substitution.ValidateVariableProhibitedP(value, prefix, arrayVars)) + return errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(value, prefix, arrayVars)) } func validateArrayVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError { - errs := substitution.ValidateVariableP(value, prefix, stringVars) + errs := substitution.ValidateNoReferencesToUnknownVariables(value, prefix, stringVars) errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys)) - return errs.Also(substitution.ValidateVariableIsolatedP(value, prefix, arrayVars)) + return errs.Also(substitution.ValidateVariableReferenceIsIsolated(value, prefix, arrayVars)) } func validateObjectVariable(value, prefix string, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { objectNames := sets.NewString() for objectParamName, keys := range objectParamNameKeys { objectNames.Insert(objectParamName) - errs = errs.Also(substitution.ValidateVariableP(value, fmt.Sprintf("%s\\.%s", prefix, objectParamName), sets.NewString(keys...))) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(value, fmt.Sprintf("%s\\.%s", prefix, objectParamName), sets.NewString(keys...))) } - return errs.Also(substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames)) + return errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(value, prefix, objectNames)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go index d3389448..e0ce5ec6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go @@ -203,7 +203,6 @@ type PipelineTask struct { Workspaces []WorkspacePipelineTaskBinding `json:"workspaces,omitempty"` // Time after which the TaskRun times out. Defaults to 1 hour. - // Specified TaskRun timeout should be less than 24h. // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go index 0c60fae3..e1a7464c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go @@ -46,8 +46,18 @@ func (p *Pipeline) SupportedVerbs() []admissionregistrationv1.OperationType { // that any references resources exist, that is done at run time. func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError { errs := validate.ObjectMetadata(p.GetObjectMeta()).ViaField("metadata") - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false) - return errs.Also(p.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + errs = errs.Also(p.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + // When a Pipeline is created directly, instead of declared inline in a PipelineRun, + // we do not support propagated parameters and workspaces. + // Validate that all params and workspaces it uses are declared. + errs = errs.Also(p.Spec.validatePipelineParameterUsage(ctx).ViaField("spec")) + errs = errs.Also(p.Spec.validatePipelineWorkspacesUsage().ViaField("spec")) + // Validate beta fields when a Pipeline is defined, but not as part of validating a Pipeline spec. + // This prevents validation from failing when a Pipeline is converted to a different API version. + // See https://github.com/tektoncd/pipeline/issues/6616 for more information. + // TODO(#6592): Decouple API versioning from feature versioning + errs = errs.Also(p.Spec.ValidateBetaFields(ctx)) + return errs } // Validate checks that taskNames in the Pipeline are valid and that the graph @@ -68,8 +78,6 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(validateExecutionStatusVariables(ps.Tasks, ps.Finally)) // Validate the pipeline's workspaces. errs = errs.Also(validatePipelineWorkspacesDeclarations(ps.Workspaces)) - errs = errs.Also(validatePipelineWorkspacesUsage(ctx, ps.Workspaces, ps.Tasks).ViaField("tasks")) - errs = errs.Also(validatePipelineWorkspacesUsage(ctx, ps.Workspaces, ps.Finally).ViaField("finally")) // Validate the pipeline's results errs = errs.Also(validatePipelineResults(ps.Results, ps.Tasks, ps.Finally)) errs = errs.Also(validateTasksAndFinallySection(ps)) @@ -81,6 +89,60 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { return errs } +// ValidateBetaFields returns an error if the Pipeline spec uses beta features but does not +// have "enable-api-fields" set to "alpha" or "beta". +func (ps *PipelineSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + // Object parameters + for i, p := range ps.Params { + if p.Type == ParamTypeObject { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields).ViaFieldIndex("params", i)) + } + } + // Indexing into array parameters + arrayParamIndexingRefs := ps.GetIndexingReferencesToArrayParams() + if len(arrayParamIndexingRefs) != 0 { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "indexing into array parameters", config.BetaAPIFields)) + } + // array and object results + for i, result := range ps.Results { + switch result.Type { + case ResultsTypeObject: + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object results", config.BetaAPIFields).ViaFieldIndex("results", i)) + case ResultsTypeArray: + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "array results", config.BetaAPIFields).ViaFieldIndex("results", i)) + case ResultsTypeString: + default: + } + } + for i, pt := range ps.Tasks { + errs = errs.Also(pt.validateBetaFields(ctx).ViaFieldIndex("tasks", i)) + } + for i, pt := range ps.Finally { + errs = errs.Also(pt.validateBetaFields(ctx).ViaFieldIndex("tasks", i)) + } + + return errs +} + +// validateBetaFields returns an error if the PipelineTask uses beta features but does not +// have "enable-api-fields" set to "alpha" or "beta". +func (pt *PipelineTask) validateBetaFields(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + if pt.TaskRef != nil { + // Resolvers + if pt.TaskRef.Resolver != "" { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "taskref.resolver", config.BetaAPIFields)) + } + if len(pt.TaskRef.Params) > 0 { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "taskref.params", config.BetaAPIFields)) + } + } else if pt.TaskSpec != nil { + errs = errs.Also(pt.TaskSpec.ValidateBetaFields(ctx)) + } + return errs +} + // ValidatePipelineTasks ensures that pipeline tasks has unique label, pipeline tasks has specified one of // taskRef or taskSpec, and in case of a pipeline task with taskRef, it has a reference to a valid task (task name) func ValidatePipelineTasks(ctx context.Context, tasks []PipelineTask, finalTasks []PipelineTask) *apis.FieldError { @@ -107,6 +169,16 @@ func (l PipelineTaskList) Validate(ctx context.Context, taskNames sets.String, p return errs } +// validateUsageOfDeclaredPipelineTaskParameters validates that all parameters referenced in the pipeline Task are declared by the pipeline Task. +func (l PipelineTaskList) validateUsageOfDeclaredPipelineTaskParameters(ctx context.Context, path string) (errs *apis.FieldError) { + for i, t := range l { + if t.TaskSpec != nil { + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.TaskSpec.Steps, t.TaskSpec.Params).ViaFieldIndex(path, i)) + } + } + return errs +} + // ValidateName checks whether the PipelineTask's name is a valid DNS label func (pt PipelineTask) ValidateName() *apis.FieldError { if err := validation.IsDNS1123Label(pt.Name); len(err) > 0 { @@ -126,11 +198,21 @@ func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(pt.validateRefOrSpec()) errs = errs.Also(pt.validateEmbeddedOrType()) - + // taskKinds contains the kinds when the apiVersion is not set, they are not custom tasks, + // if apiVersion is set they are custom tasks. + taskKinds := map[TaskKind]bool{ + "": true, + NamespacedTaskKind: true, + ClusterTaskRefKind: true, + } // Pipeline task having taskRef/taskSpec with APIVersion is classified as custom task switch { + case pt.TaskRef != nil && !taskKinds[pt.TaskRef.Kind]: + errs = errs.Also(pt.validateCustomTask()) case pt.TaskRef != nil && pt.TaskRef.APIVersion != "": errs = errs.Also(pt.validateCustomTask()) + case pt.TaskSpec != nil && !taskKinds[TaskKind(pt.TaskSpec.Kind)]: + errs = errs.Also(pt.validateCustomTask()) case pt.TaskSpec != nil && pt.TaskSpec.APIVersion != "": errs = errs.Also(pt.validateCustomTask()) default: @@ -145,9 +227,9 @@ func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldErr // when the enable-api-fields feature gate is anything but "alpha". errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) errs = errs.Also(pt.Matrix.validateCombinationsCount(ctx)) + errs = errs.Also(pt.Matrix.validateUniqueParams()) } errs = errs.Also(pt.Matrix.validateParameterInOneOfMatrixOrParams(pt.Params)) - errs = errs.Also(pt.Matrix.validateParams()) return errs } @@ -239,29 +321,12 @@ func (pt PipelineTask) validateCustomTask() (errs *apis.FieldError) { // validateTask validates a pipeline task or a final task for taskRef and taskSpec func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) // Validate TaskSpec if it's present if pt.TaskSpec != nil { errs = errs.Also(pt.TaskSpec.Validate(ctx).ViaField("taskSpec")) } if pt.TaskRef != nil { - if pt.TaskRef.Name != "" { - // TaskRef name must be a valid k8s name - if errSlice := validation.IsQualifiedName(pt.TaskRef.Name); len(errSlice) != 0 { - errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name")) - } - } else if pt.TaskRef.Resolver == "" { - errs = errs.Also(apis.ErrInvalidValue("taskRef must specify name", "taskRef.name")) - } - if cfg.FeatureFlags.EnableAPIFields != config.BetaAPIFields && cfg.FeatureFlags.EnableAPIFields != config.AlphaAPIFields { - // fail if resolver or resource are present when enable-api-fields is false. - if pt.TaskRef.Resolver != "" { - errs = errs.Also(apis.ErrDisallowedFields("taskref.resolver")) - } - if len(pt.TaskRef.Params) > 0 { - errs = errs.Also(apis.ErrDisallowedFields("taskref.params")) - } - } + errs = errs.Also(pt.TaskRef.Validate(ctx).ViaField("taskRef")) } return errs } @@ -285,12 +350,43 @@ func validatePipelineWorkspacesDeclarations(wss []PipelineWorkspaceDeclaration) return errs } -// validatePipelineWorkspacesUsage validates that all the referenced workspaces (by pipeline tasks) are specified in -// the pipeline -func validatePipelineWorkspacesUsage(ctx context.Context, wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) { - if !config.ValidateParameterVariablesAndWorkspaces(ctx) { - return nil +// validatePipelineParameterUsage validates that parameters referenced in the Pipeline are declared by the Pipeline +func (ps *PipelineSpec) validatePipelineParameterUsage(ctx context.Context) (errs *apis.FieldError) { + errs = errs.Also(PipelineTaskList(ps.Tasks).validateUsageOfDeclaredPipelineTaskParameters(ctx, "tasks")) + errs = errs.Also(PipelineTaskList(ps.Finally).validateUsageOfDeclaredPipelineTaskParameters(ctx, "finally")) + errs = errs.Also(validatePipelineTaskParameterUsage(ps.Tasks, ps.Params).ViaField("tasks")) + errs = errs.Also(validatePipelineTaskParameterUsage(ps.Finally, ps.Params).ViaField("finally")) + return errs +} + +// validatePipelineTaskParameterUsage validates that parameters referenced in the Pipeline Tasks are declared by the Pipeline +func validatePipelineTaskParameterUsage(tasks []PipelineTask, params ParamSpecs) (errs *apis.FieldError) { + allParamNames := sets.NewString(params.getNames()...) + _, arrayParams, objectParams := params.sortByType() + arrayParamNames := sets.NewString(arrayParams.getNames()...) + objectParameterNameKeys := map[string][]string{} + for _, p := range objectParams { + for k := range p.Properties { + objectParameterNameKeys[p.Name] = append(objectParameterNameKeys[p.Name], k) + } + } + errs = errs.Also(validatePipelineParametersVariables(tasks, "params", allParamNames, arrayParamNames, objectParameterNameKeys)) + for i, task := range tasks { + errs = errs.Also(task.Params.validateDuplicateParameters().ViaFieldIndex("params", i)) } + return errs +} + +// validatePipelineWorkspacesUsage validates that workspaces referenced in the Pipeline are declared by the Pipeline +func (ps *PipelineSpec) validatePipelineWorkspacesUsage() (errs *apis.FieldError) { + errs = errs.Also(validatePipelineTasksWorkspacesUsage(ps.Workspaces, ps.Tasks).ViaField("tasks")) + errs = errs.Also(validatePipelineTasksWorkspacesUsage(ps.Workspaces, ps.Finally).ViaField("finally")) + return errs +} + +// validatePipelineTasksWorkspacesUsage validates that all the referenced workspaces (by pipeline tasks) are specified in +// the pipeline +func validatePipelineTasksWorkspacesUsage(wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) { workspaceNames := sets.NewString() for _, ws := range wss { workspaceNames.Insert(ws.Name) @@ -304,37 +400,16 @@ func validatePipelineWorkspacesUsage(ctx context.Context, wss []PipelineWorkspac // ValidatePipelineParameterVariables validates parameters with those specified by each pipeline task, // (1) it validates the type of parameter is either string or array (2) parameter default value matches -// with the type of that param (3) ensures that the referenced param variable is defined is part of the param declarations -func ValidatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params []ParamSpec) (errs *apis.FieldError) { - parameterNames := sets.NewString() - arrayParameterNames := sets.NewString() - objectParameterNameKeys := map[string][]string{} - +// with the type of that param +func ValidatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params ParamSpecs) (errs *apis.FieldError) { // validates all the types within a slice of ParamSpecs errs = errs.Also(ValidateParameterTypes(ctx, params).ViaField("params")) - - for _, p := range params { - if parameterNames.Has(p.Name) { - errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) - } - // Add parameter name to parameterNames, and to arrayParameterNames if type is array. - parameterNames.Insert(p.Name) - if p.Type == ParamTypeArray { - arrayParameterNames.Insert(p.Name) - } - - if p.Type == ParamTypeObject { - for k := range p.Properties { - objectParameterNameKeys[p.Name] = append(objectParameterNameKeys[p.Name], k) - } - } - } - if config.ValidateParameterVariablesAndWorkspaces(ctx) { - errs = errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames, objectParameterNameKeys)) + errs = errs.Also(params.validateNoDuplicateNames()) + for i, task := range tasks { + errs = errs.Also(task.Params.validateDuplicateParameters().ViaField("params").ViaIndex(i)) } return errs } - func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for idx, task := range tasks { errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) @@ -493,7 +568,7 @@ func validateExecutionStatusVariablesExpressions(expressions []string, ptNames s } func validatePipelineContextVariablesInParamValues(paramValues []string, prefix string, contextNames sets.String) (errs *apis.FieldError) { for _, paramValue := range paramValues { - errs = errs.Also(substitution.ValidateVariableP(paramValue, prefix, contextNames).ViaField("value")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(paramValue, prefix, contextNames).ViaField("value")) } return errs } @@ -675,20 +750,11 @@ func validateResultsFromMatrixedPipelineTasksNotConsumed(tasks []PipelineTask, f return errs } -// ValidateParamArrayIndex validates if the param reference to an array param is out of bound. -// error is returned when the array indexing reference is out of bound of the array param -// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2. -func (ps *PipelineSpec) ValidateParamArrayIndex(ctx context.Context, params Params) error { - if !config.CheckAlphaOrBetaAPIFields(ctx) { - return nil - } - - // Collect all array params lengths - arrayParamsLengths := ps.Params.extractParamArrayLengths() - for k, v := range params.extractParamArrayLengths() { - arrayParamsLengths[k] = v - } - +// GetIndexingReferencesToArrayParams returns all strings referencing indices of PipelineRun array parameters +// from parameters, workspaces, and when expressions defined in the Pipeline's Tasks and Finally Tasks. +// For example, if a Task in the Pipeline has a parameter with a value "$(params.array-param-name[1])", +// this would be one of the strings returned. +func (ps *PipelineSpec) GetIndexingReferencesToArrayParams() sets.String { paramsRefs := []string{} for i := range ps.Tasks { paramsRefs = append(paramsRefs, ps.Tasks[i].Params.extractValues()...) @@ -703,22 +769,20 @@ func (ps *PipelineSpec) ValidateParamArrayIndex(ctx context.Context, params Para paramsRefs = append(paramsRefs, wes.Values...) } } - for i := range ps.Finally { paramsRefs = append(paramsRefs, ps.Finally[i].Params.extractValues()...) if ps.Finally[i].IsMatrixed() { paramsRefs = append(paramsRefs, ps.Finally[i].Matrix.Params.extractValues()...) } for _, wes := range ps.Finally[i].When { + paramsRefs = append(paramsRefs, wes.Input) paramsRefs = append(paramsRefs, wes.Values...) } } - // extract all array indexing references, for example []{"$(params.array-params[1])"} arrayIndexParamRefs := []string{} for _, p := range paramsRefs { arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...) } - - return validateOutofBoundArrayParams(arrayIndexParamRefs, arrayParamsLengths) + return sets.NewString(arrayIndexParamRefs...) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go index e53efe5e..19f00f70 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go @@ -18,19 +18,32 @@ package v1 import ( "context" + "regexp" "time" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" + "knative.dev/pkg/kmap" ) -var _ apis.Defaultable = (*PipelineRun)(nil) +var ( + _ apis.Defaultable = (*PipelineRun)(nil) + filterReservedAnnotationRegexp = regexp.MustCompile(pipeline.TektonReservedAnnotationExpr) +) // SetDefaults implements apis.Defaultable func (pr *PipelineRun) SetDefaults(ctx context.Context) { pr.Spec.SetDefaults(ctx) + + // Silently filtering out Tekton Reserved annotations at creation + if apis.IsInCreate(ctx) { + pr.ObjectMeta.Annotations = kmap.Filter(pr.ObjectMeta.Annotations, func(s string) bool { + return filterReservedAnnotationRegexp.MatchString(s) + }) + } } // SetDefaults implements apis.Defaultable diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go index cc8ad992..88120472 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go @@ -18,6 +18,7 @@ package v1 import ( "context" + "fmt" "time" "github.com/tektoncd/pipeline/pkg/apis/config" @@ -153,6 +154,22 @@ func (pr *PipelineRun) GetNamespacedName() types.NamespacedName { return types.NamespacedName{Namespace: pr.Namespace, Name: pr.Name} } +// IsTimeoutConditionSet returns true when the pipelinerun has the pipelinerun timed out reason +func (pr *PipelineRun) IsTimeoutConditionSet() bool { + condition := pr.Status.GetCondition(apis.ConditionSucceeded) + return condition.IsFalse() && condition.Reason == PipelineRunReasonTimedOut.String() +} + +// SetTimeoutCondition sets the status of the PipelineRun to timed out. +func (pr *PipelineRun) SetTimeoutCondition(ctx context.Context) { + pr.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: PipelineRunReasonTimedOut.String(), + Message: fmt.Sprintf("PipelineRun %q failed to finish within %q", pr.Name, pr.PipelineTimeout(ctx).String()), + }) +} + // HasTimedOut returns true if a pipelinerun has exceeded its spec.Timeout based on its status.Timeout func (pr *PipelineRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bool { timeout := pr.PipelineTimeout(ctx) @@ -170,6 +187,19 @@ func (pr *PipelineRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bo return false } +// HasTimedOutForALongTime returns true if a pipelinerun has exceeed its spec.Timeout based its status.StartTime +// by a large margin +func (pr *PipelineRun) HasTimedOutForALongTime(ctx context.Context, c clock.PassiveClock) bool { + if !pr.HasTimedOut(ctx, c) { + return false + } + timeout := pr.PipelineTimeout(ctx) + startTime := pr.Status.StartTime + runtime := c.Since(startTime.Time) + // We are arbitrarily defining large margin as doubling the spec.timeout + return runtime >= 2*timeout +} + // HaveTasksTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Tasks func (pr *PipelineRun) HaveTasksTimedOut(ctx context.Context, c clock.PassiveClock) bool { timeout := pr.TasksTimeout() diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go index d5e8361f..a072eef2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go @@ -66,8 +66,12 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) // Validate PipelineSpec if it's present if ps.PipelineSpec != nil { - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, true) errs = errs.Also(ps.PipelineSpec.Validate(ctx).ViaField("pipelineSpec")) + // Validate beta fields separately for inline Pipeline definitions. + // This prevents validation from failing in the reconciler when a Pipeline is converted to a different API version. + // See https://github.com/tektoncd/pipeline/issues/6616 for more information. + // TODO(#6592): Decouple API versioning from feature versioning + errs = errs.Also(ps.PipelineSpec.ValidateBetaFields(ctx).ViaField("pipelineSpec")) } // Validate PipelineRun parameters @@ -148,66 +152,62 @@ func (ps *PipelineRunSpec) validateInlineParameters(ctx context.Context) (errs * if ps.PipelineSpec == nil { return errs } - var paramSpec []ParamSpec + paramSpecForValidation := make(map[string]ParamSpec) for _, p := range ps.Params { - pSpec := ParamSpec{ - Name: p.Name, - Default: &p.Value, + paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation) + } + for _, p := range ps.PipelineSpec.Params { + var err *apis.FieldError + paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation) + if err != nil { + errs = errs.Also(err) } - paramSpec = append(paramSpec, pSpec) } - paramSpec = appendParamSpec(paramSpec, ps.PipelineSpec.Params) for _, pt := range ps.PipelineSpec.Tasks { - paramSpec = appendParam(paramSpec, pt.Params) + paramSpecForValidation = appendPipelineTaskParams(paramSpecForValidation, pt.Params) if pt.TaskSpec != nil && pt.TaskSpec.Params != nil { - paramSpec = appendParamSpec(paramSpec, pt.TaskSpec.Params) + for _, p := range pt.TaskSpec.Params { + var err *apis.FieldError + paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation) + if err != nil { + errs = errs.Also(err) + } + } } } + var paramSpec []ParamSpec + for _, v := range paramSpecForValidation { + paramSpec = append(paramSpec, v) + } if ps.PipelineSpec != nil && ps.PipelineSpec.Tasks != nil { for _, pt := range ps.PipelineSpec.Tasks { if pt.TaskSpec != nil && pt.TaskSpec.Steps != nil { - errs = errs.Also(ValidateParameterVariables( - config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), pt.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateParameterTypes(ctx, paramSpec)) + errs = errs.Also(ValidateParameterVariables(ctx, pt.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, pt.TaskSpec.Steps, paramSpec)) } } + errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.PipelineSpec.Tasks, paramSpec)) + errs = errs.Also(validatePipelineTaskParameterUsage(ps.PipelineSpec.Tasks, paramSpec)) } return errs } -func appendParamSpec(paramSpec []ParamSpec, params []ParamSpec) []ParamSpec { +func appendPipelineTaskParams(paramSpecForValidation map[string]ParamSpec, params Params) map[string]ParamSpec { for _, p := range params { - skip := false - for _, ps := range paramSpec { - if ps.Name == p.Name { - skip = true - break - } - } - if !skip { - paramSpec = append(paramSpec, p) - } - } - return paramSpec -} - -func appendParam(paramSpec []ParamSpec, params Params) []ParamSpec { - for _, p := range params { - skip := false - for _, ps := range paramSpec { - if ps.Name == p.Name { - skip = true - break - } - } - if !skip { - pSpec := ParamSpec{ - Name: p.Name, - Default: &p.Value, + if pSpec, ok := paramSpecForValidation[p.Name]; ok { + if p.Value.ObjectVal != nil { + for k, v := range p.Value.ObjectVal { + pSpec.Default.ObjectVal[k] = v + pSpec.Properties[k] = PropertySpec{Type: ParamTypeString} + } } - paramSpec = append(paramSpec, pSpec) + paramSpecForValidation[p.Name] = pSpec + } else { + paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation) } } - return paramSpec + return paramSpecForValidation } func validateSpecStatus(status PipelineRunSpecStatus) *apis.FieldError { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go index 1fd9ddd6..0d19c2ab 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go @@ -16,18 +16,10 @@ package v1 import ( "context" "fmt" - "regexp" - "github.com/tektoncd/pipeline/pkg/apis/config" - "github.com/tektoncd/pipeline/pkg/apis/version" "knative.dev/pkg/apis" ) -// ResultNameFormat Constant used to define the regex Result.Name should follow -const ResultNameFormat = `^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$` - -var resultNameFormatRegex = regexp.MustCompile(ResultNameFormat) - // Validate implements apis.Validatable func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { if !resultNameFormatRegex.MatchString(tr.Name) { @@ -35,15 +27,11 @@ func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { } switch { - // Object results is beta feature - check if the feature flag is set to "beta" or "alpha" case tr.Type == ResultsTypeObject: errs := validateObjectResult(tr) - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.BetaAPIFields)) return errs - // Array results is a beta feature - check if the feature flag is set to "beta" or "alpha" case tr.Type == ResultsTypeArray: - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.BetaAPIFields)) - return errs + return nil // Resources created before the result. Type was introduced may not have Type set // and should be considered valid case tr.Type == "": diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go index 1dcd06e6..5a7610e0 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go @@ -39,19 +39,26 @@ const ( objectResultExpressionFormat = "tasks..results.." // ResultTaskPart Constant used to define the "tasks" part of a pipeline result reference ResultTaskPart = "tasks" - // ResultFinallyPart Constant used to define the "finally" part of a task result reference + // ResultFinallyPart Constant used to define the "finally" part of a pipeline result reference ResultFinallyPart = "finally" // ResultResultPart Constant used to define the "results" part of a pipeline result reference ResultResultPart = "results" // TODO(#2462) use one regex across all substitutions // variableSubstitutionFormat matches format like $result.resultname, $result.resultname[int] and $result.resultname[*] variableSubstitutionFormat = `\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)` + // exactVariableSubstitutionFormat matches strings that only contain a single reference to result or param variables, but nothing else + // i.e. `$(result.resultname)` is a match, but `foo $(result.resultname)` is not. + exactVariableSubstitutionFormat = `^\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)$` // arrayIndexing will match all `[int]` and `[*]` for parseExpression arrayIndexing = `\[([0-9])*\*?\]` + // ResultNameFormat Constant used to define the regex Result.Name should follow + ResultNameFormat = `^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$` ) // VariableSubstitutionRegex is a regex to find all result matching substitutions var VariableSubstitutionRegex = regexp.MustCompile(variableSubstitutionFormat) +var exactVariableSubstitutionRegex = regexp.MustCompile(exactVariableSubstitutionFormat) +var resultNameFormatRegex = regexp.MustCompile(ResultNameFormat) // arrayIndexingRegex is used to match `[int]` and `[*]` var arrayIndexingRegex = regexp.MustCompile(arrayIndexing) @@ -180,7 +187,7 @@ func parseExpression(substitutionExpression string) (string, string, int, string return subExpressions[1], subExpressions[3], 0, subExpressions[4], nil } } - return "", "", 0, "", fmt.Errorf("Must be one of the form 1). %q; 2). %q", resultExpressionFormat, objectResultExpressionFormat) + return "", "", 0, "", fmt.Errorf("must be one of the form 1). %q; 2). %q", resultExpressionFormat, objectResultExpressionFormat) } // ParseResultName parse the input string to extract resultName and result index. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json index f7b05c14..5a71f5f5 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json @@ -917,7 +917,7 @@ "$ref": "#/definitions/v1.EmbeddedTask" }, "timeout": { - "description": "Time after which the TaskRun times out. Defaults to 1 hour. Specified TaskRun timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "description": "Time after which the TaskRun times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", "$ref": "#/definitions/v1.Duration" }, "when": { @@ -1905,7 +1905,7 @@ "$ref": "#/definitions/v1.TaskSpec" }, "timeout": { - "description": "Time after which one retry attempt times out. Defaults to 1 hour. Specified build timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "description": "Time after which one retry attempt times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", "$ref": "#/definitions/v1.Duration" }, "workspaces": { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go index 5848053b..8a051618 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go @@ -62,8 +62,16 @@ var objectVariableNameFormatRegex = regexp.MustCompile(objectVariableNameFormat) // Validate implements apis.Validatable func (t *Task) Validate(ctx context.Context) *apis.FieldError { errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false) - return errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + errs = errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + // When a Task is created directly, instead of declared inline in a TaskRun or PipelineRun, + // we do not support propagated parameters. Validate that all params it uses are declared. + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.Spec.Steps, t.Spec.Params).ViaField("spec")) + // Validate beta fields when a Task is defined, but not as part of validating a Task spec. + // This prevents validation from failing when a Task is converted to a different API version. + // See https://github.com/tektoncd/pipeline/issues/6616 for more information. + // TODO(#6592): Decouple API versioning from feature versioning + errs = errs.Also(t.Spec.ValidateBetaFields(ctx)) + return errs } // Validate implements apis.Validatable @@ -72,14 +80,6 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(apis.ErrMissingField("steps")) } - if config.IsSubstituted(ctx) { - // Validate the task's workspaces only. - errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) - errs = errs.Also(validateWorkspaceUsages(ctx, ts)) - - return errs - } - errs = errs.Also(ValidateVolumes(ts.Volumes).ViaField("volumes")) errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) errs = errs.Also(validateWorkspaceUsages(ctx, ts)) @@ -102,6 +102,57 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { return errs } +// ValidateBetaFields returns an error if the Task spec uses beta features but does not +// have "enable-api-fields" set to "alpha" or "beta". +func (ts *TaskSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + // Object parameters + for i, p := range ts.Params { + if p.Type == ParamTypeObject { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields).ViaFieldIndex("params", i)) + } + } + // Indexing into array parameters + arrayIndexParamRefs := ts.GetIndexingReferencesToArrayParams() + if len(arrayIndexParamRefs) != 0 { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "indexing into array parameters", config.BetaAPIFields)) + } + // Array and object results + for i, result := range ts.Results { + switch result.Type { + case ResultsTypeObject: + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object results", config.BetaAPIFields).ViaFieldIndex("results", i)) + case ResultsTypeArray: + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "array results", config.BetaAPIFields).ViaFieldIndex("results", i)) + case ResultsTypeString: + default: + } + } + return errs +} + +// ValidateUsageOfDeclaredParameters validates that all parameters referenced in the Task are declared by the Task. +func ValidateUsageOfDeclaredParameters(ctx context.Context, steps []Step, params ParamSpecs) *apis.FieldError { + var errs *apis.FieldError + _, _, objectParams := params.sortByType() + allParameterNames := sets.NewString(params.getNames()...) + errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) + errs = errs.Also(validateObjectUsage(ctx, steps, objectParams)) + errs = errs.Also(validateObjectParamsHaveProperties(ctx, params)) + return errs +} + +// validateObjectParamsHaveProperties returns an error if any declared object params are missing properties +func validateObjectParamsHaveProperties(ctx context.Context, params ParamSpecs) *apis.FieldError { + var errs *apis.FieldError + for _, p := range params { + if p.Type == ParamTypeObject && p.Properties == nil { + errs = errs.Also(apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name))) + } + } + return errs +} + func validateSidecarNames(sidecars []Sidecar) (errs *apis.FieldError) { for _, sc := range sidecars { if sc.Name == pipeline.ReservedResultsSidecarName { @@ -157,8 +208,8 @@ func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, // validateWorkspaceUsages checks that all WorkspaceUsage objects in Steps // refer to workspaces that are defined in the Task. // -// This is an alpha feature and will fail validation if it's used by a step -// or sidecar when the enable-api-fields feature gate is anything but "alpha". +// This is a beta feature and will fail validation if it's used by a step +// or sidecar when the enable-api-fields feature gate is anything but "beta". func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.FieldError) { workspaces := ts.Workspaces steps := ts.Steps @@ -171,7 +222,7 @@ func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.Fiel for stepIdx, step := range steps { if len(step.Workspaces) != 0 { - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.AlphaAPIFields).ViaIndex(stepIdx).ViaField("steps")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.BetaAPIFields).ViaIndex(stepIdx).ViaField("steps")) } for workspaceIdx, w := range step.Workspaces { if !wsNames.Has(w.Name) { @@ -182,7 +233,7 @@ func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.Fiel for sidecarIdx, sidecar := range sidecars { if len(sidecar.Workspaces) != 0 { - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.AlphaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.BetaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) } for workspaceIdx, w := range sidecar.Workspaces { if !wsNames.Has(w.Name) { @@ -294,11 +345,6 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi // ValidateParameterTypes validates all the types within a slice of ParamSpecs func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) { for _, p := range params { - if p.Type == ParamTypeObject { - // Object type parameter is a beta feature and will fail validation if it's used in a task spec - // when the enable-api-fields feature gate is not "alpha" or "beta". - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields)) - } errs = errs.Also(p.ValidateType(ctx)) } return errs @@ -337,14 +383,6 @@ func (p ParamSpec) ValidateType(ctx context.Context) *apis.FieldError { // definition of `properties` section and the type of a PropertySpec is allowed. // (Currently, only string is allowed) func (p ParamSpec) ValidateObjectType(ctx context.Context) *apis.FieldError { - if p.Type == ParamTypeObject && p.Properties == nil { - // If this we are not skipping validation checks due to propagated params - // then properties field is required. - if config.ValidateParameterVariablesAndWorkspaces(ctx) { - return apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) - } - } - invalidKeys := []string{} for key, propertySpec := range p.Properties { if propertySpec.Type != ParamTypeString { @@ -363,38 +401,17 @@ func (p ParamSpec) ValidateObjectType(ctx context.Context) *apis.FieldError { } // ValidateParameterVariables validates all variables within a slice of ParamSpecs against a slice of Steps -func ValidateParameterVariables(ctx context.Context, steps []Step, params []ParamSpec) *apis.FieldError { - allParameterNames := sets.NewString() - stringParameterNames := sets.NewString() - arrayParameterNames := sets.NewString() - objectParamSpecs := []ParamSpec{} +func ValidateParameterVariables(ctx context.Context, steps []Step, params ParamSpecs) *apis.FieldError { var errs *apis.FieldError - for _, p := range params { - // validate no duplicate names - if allParameterNames.Has(p.Name) { - errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) - } - allParameterNames.Insert(p.Name) - - switch p.Type { - case ParamTypeArray: - arrayParameterNames.Insert(p.Name) - case ParamTypeObject: - objectParamSpecs = append(objectParamSpecs, p) - case ParamTypeString: - fallthrough - default: - stringParameterNames.Insert(p.Name) - } - } - errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParamSpecs)) - if config.ValidateParameterVariablesAndWorkspaces(ctx) { - errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) - errs = errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) - } + errs = errs.Also(params.validateNoDuplicateNames()) + stringParams, arrayParams, objectParams := params.sortByType() + stringParameterNames := sets.NewString(stringParams.getNames()...) + arrayParameterNames := sets.NewString(arrayParams.getNames()...) + errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParams)) return errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) } +// validateTaskContextVariables returns an error if any Steps reference context variables that don't exist. func validateTaskContextVariables(ctx context.Context, steps []Step) *apis.FieldError { taskRunContextNames := sets.NewString().Insert( "name", @@ -416,7 +433,7 @@ func validateTaskResultsVariables(ctx context.Context, steps []Step, results []T resultsNames.Insert(r.Name) } for idx, step := range steps { - errs = errs.Also(validateTaskVariable(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx)) } return errs } @@ -441,8 +458,7 @@ func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) return errs.Also(validateObjectUsageAsWhole(steps, "params", objectParameterNames)) } -// validateObjectUsageAsWhole makes sure the object params are not used as whole when providing values for strings -// i.e. param.objectParam, param.objectParam[*] +// validateObjectUsageAsWhole returns an error if the Steps contain references to the entire input object params in fields where these references are prohibited func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { for idx, step := range steps { errs = errs.Also(validateStepObjectUsageAsWhole(step, prefix, vars)).ViaFieldIndex("steps", idx) @@ -450,59 +466,62 @@ func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) ( return errs } +// validateStepObjectUsageAsWhole returns an error if the Step contains references to the entire input object params in fields where these references are prohibited func validateStepObjectUsageAsWhole(step Step, prefix string, vars sets.String) *apis.FieldError { - errs := validateTaskNoObjectReferenced(step.Name, prefix, vars).ViaField("name") - errs = errs.Also(validateTaskNoObjectReferenced(step.Image, prefix, vars).ViaField("image")) - errs = errs.Also(validateTaskNoObjectReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskNoObjectReferenced(step.Script, prefix, vars).ViaField("script")) + errs := substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Script, prefix, vars).ViaField("script")) for i, cmd := range step.Command { - errs = errs.Also(validateTaskNoObjectReferenced(cmd, prefix, vars).ViaFieldIndex("command", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(cmd, prefix, vars).ViaFieldIndex("command", i)) } for i, arg := range step.Args { - errs = errs.Also(validateTaskNoObjectReferenced(arg, prefix, vars).ViaFieldIndex("args", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(arg, prefix, vars).ViaFieldIndex("args", i)) } for _, env := range step.Env { - errs = errs.Also(validateTaskNoObjectReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) } for i, v := range step.VolumeMounts { - errs = errs.Also(validateTaskNoObjectReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoObjectReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoObjectReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) } return errs } -func validateArrayUsage(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { +// validateArrayUsage returns an error if the Steps contain references to the input array params in fields where these references are prohibited +func validateArrayUsage(steps []Step, prefix string, arrayParamNames sets.String) (errs *apis.FieldError) { for idx, step := range steps { - errs = errs.Also(validateStepArrayUsage(step, prefix, vars)).ViaFieldIndex("steps", idx) + errs = errs.Also(validateStepArrayUsage(step, prefix, arrayParamNames)).ViaFieldIndex("steps", idx) } return errs } -func validateStepArrayUsage(step Step, prefix string, vars sets.String) *apis.FieldError { - errs := validateTaskNoArrayReferenced(step.Name, prefix, vars).ViaField("name") - errs = errs.Also(validateTaskNoArrayReferenced(step.Image, prefix, vars).ViaField("image")) - errs = errs.Also(validateTaskNoArrayReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskNoArrayReferenced(step.Script, prefix, vars).ViaField("script")) +// validateStepArrayUsage returns an error if the Step contains references to the input array params in fields where these references are prohibited +func validateStepArrayUsage(step Step, prefix string, arrayParamNames sets.String) *apis.FieldError { + errs := substitution.ValidateNoReferencesToProhibitedVariables(step.Name, prefix, arrayParamNames).ViaField("name") + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.Image, prefix, arrayParamNames).ViaField("image")) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.WorkingDir, prefix, arrayParamNames).ViaField("workingDir")) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.Script, prefix, arrayParamNames).ViaField("script")) for i, cmd := range step.Command { - errs = errs.Also(validateTaskArraysIsolated(cmd, prefix, vars).ViaFieldIndex("command", i)) + errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(cmd, prefix, arrayParamNames).ViaFieldIndex("command", i)) } for i, arg := range step.Args { - errs = errs.Also(validateTaskArraysIsolated(arg, prefix, vars).ViaFieldIndex("args", i)) + errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(arg, prefix, arrayParamNames).ViaFieldIndex("args", i)) } for _, env := range step.Env { - errs = errs.Also(validateTaskNoArrayReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(env.Value, prefix, arrayParamNames).ViaFieldKey("env", env.Name)) } for i, v := range step.VolumeMounts { - errs = errs.Also(validateTaskNoArrayReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoArrayReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoArrayReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.Name, prefix, arrayParamNames).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.MountPath, prefix, arrayParamNames).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.SubPath, prefix, arrayParamNames).ViaField("subPath").ViaFieldIndex("volumeMount", i)) } return errs } +// validateVariables returns an error if the Steps contain references to any unknown variables func validateVariables(ctx context.Context, steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { - // We've checked param name format. Now, we want to check if param names are referenced correctly in each step for idx, step := range steps { errs = errs.Also(validateStepVariables(ctx, step, prefix, vars).ViaFieldIndex("steps", idx)) } @@ -558,68 +577,41 @@ func validateNameFormat(stringAndArrayParams sets.String, objectParams []ParamSp return errs } +// validateStepVariables returns an error if the Step contains references to any unknown variables func validateStepVariables(ctx context.Context, step Step, prefix string, vars sets.String) *apis.FieldError { - errs := validateTaskVariable(step.Name, prefix, vars).ViaField("name") - errs = errs.Also(validateTaskVariable(step.Image, prefix, vars).ViaField("image")) - errs = errs.Also(validateTaskVariable(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskVariable(step.Script, prefix, vars).ViaField("script")) + errs := substitution.ValidateNoReferencesToUnknownVariables(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, prefix, vars).ViaField("script")) for i, cmd := range step.Command { - errs = errs.Also(validateTaskVariable(cmd, prefix, vars).ViaFieldIndex("command", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(cmd, prefix, vars).ViaFieldIndex("command", i)) } for i, arg := range step.Args { - errs = errs.Also(validateTaskVariable(arg, prefix, vars).ViaFieldIndex("args", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(arg, prefix, vars).ViaFieldIndex("args", i)) } for _, env := range step.Env { - errs = errs.Also(validateTaskVariable(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) } for i, v := range step.VolumeMounts { - errs = errs.Also(validateTaskVariable(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskVariable(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskVariable(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i)) } - errs = errs.Also(validateTaskVariable(string(step.OnError), prefix, vars).ViaField("onError")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(string(step.OnError), prefix, vars).ViaField("onError")) return errs } -func validateTaskVariable(value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariableP(value, prefix, vars) -} - -func validateTaskNoObjectReferenced(value, prefix string, objectNames sets.String) *apis.FieldError { - return substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames) -} - -func validateTaskNoArrayReferenced(value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableProhibitedP(value, prefix, arrayNames) -} - -func validateTaskArraysIsolated(value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableIsolatedP(value, prefix, arrayNames) -} - // isParamRefs attempts to check if a specified string looks like it contains any parameter reference // This is useful to make sure the specified value looks like a Parameter Reference before performing any strict validation func isParamRefs(s string) bool { return strings.HasPrefix(s, "$("+ParamsPrefix) } -// ValidateParamArrayIndex validates if the param reference to an array param is out of bound. -// error is returned when the array indexing reference is out of bound of the array param -// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2. -// - `trParams` are params from taskrun. -// - `taskSpec` contains params declarations. -func (ts *TaskSpec) ValidateParamArrayIndex(ctx context.Context, params Params) error { - cfg := config.FromContextOrDefaults(ctx) - if cfg.FeatureFlags.EnableAPIFields != config.AlphaAPIFields { - return nil - } - - // Collect all array params lengths - arrayParamsLengths := ts.Params.extractParamArrayLengths() - for k, v := range params.extractParamArrayLengths() { - arrayParamsLengths[k] = v - } - +// GetIndexingReferencesToArrayParams returns all strings referencing indices of TaskRun array parameters +// from parameters, workspaces, and when expressions defined in the Task. +// For example, if a Task has a parameter with a value "$(params.array-param-name[1])", +// this would be one of the strings returned. +func (ts *TaskSpec) GetIndexingReferencesToArrayParams() sets.String { // collect all the possible places to use param references paramsRefs := []string{} paramsRefs = append(paramsRefs, extractParamRefsFromSteps(ts.Steps)...) @@ -629,12 +621,10 @@ func (ts *TaskSpec) ValidateParamArrayIndex(ctx context.Context, params Params) paramsRefs = append(paramsRefs, v.MountPath) } paramsRefs = append(paramsRefs, extractParamRefsFromSidecars(ts.Sidecars)...) - // extract all array indexing references, for example []{"$(params.array-params[1])"} arrayIndexParamRefs := []string{} for _, p := range paramsRefs { arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...) } - - return validateOutofBoundArrayParams(arrayIndexParamRefs, arrayParamsLengths) + return sets.NewString(arrayIndexParamRefs...) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_types.go index 2bb395da..f7558333 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_types.go @@ -42,6 +42,9 @@ type TaskKind string const ( // NamespacedTaskKind indicates that the task type has a namespaced scope. NamespacedTaskKind TaskKind = "Task" + // ClusterTaskRefKind is the task type for a reference to a task with cluster scope. + // ClusterTasks are not supported in v1, but v1 types may reference ClusterTasks. + ClusterTaskRefKind TaskKind = "ClusterTask" ) // IsCustomTask checks whether the reference is to a Custom Task diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go index 02dca530..104af5d0 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go @@ -18,9 +18,11 @@ package v1 import ( "context" + "strings" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/version" + "k8s.io/apimachinery/pkg/util/validation" "knative.dev/pkg/apis" ) @@ -31,7 +33,8 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { return } - if ref.Resolver != "" || ref.Params != nil { + switch { + case ref.Resolver != "" || ref.Params != nil: if ref.Resolver != "" { errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver")) if ref.Name != "" { @@ -48,8 +51,13 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { } errs = errs.Also(ValidateParameters(ctx, ref.Params)) } - } else if ref.Name == "" { + case ref.Name != "": + // TaskRef name must be a valid k8s name + if errSlice := validation.IsQualifiedName(ref.Name); len(errSlice) != 0 { + errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name")) + } + default: errs = errs.Also(apis.ErrMissingField("name")) } - return + return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go index 9f34e6b5..79fa50f3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go @@ -24,6 +24,7 @@ import ( pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" + "knative.dev/pkg/kmap" ) var _ apis.Defaultable = (*TaskRun)(nil) @@ -36,6 +37,13 @@ func (tr *TaskRun) SetDefaults(ctx context.Context) { ctx = apis.WithinParent(ctx, tr.ObjectMeta) tr.Spec.SetDefaults(ctx) + // Silently filtering out Tekton Reserved annotations at creation + if apis.IsInCreate(ctx) { + tr.ObjectMeta.Annotations = kmap.Filter(tr.ObjectMeta.Annotations, func(s string) bool { + return filterReservedAnnotationRegexp.MatchString(s) + }) + } + // If the TaskRun doesn't have a managed-by label, apply the default // specified in the config. cfg := config.FromContextOrDefaults(ctx) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go index 7375c01f..30095d53 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go @@ -55,7 +55,6 @@ type TaskRunSpec struct { // +optional Retries int `json:"retries,omitempty"` // Time after which one retry attempt times out. Defaults to 1 hour. - // Specified build timeout should be less than 24h. // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -381,11 +380,16 @@ func (tr *TaskRun) HasStarted() bool { return tr.Status.StartTime != nil && !tr.Status.StartTime.IsZero() } -// IsSuccessful returns true if the TaskRun's status indicates that it is done. +// IsSuccessful returns true if the TaskRun's status indicates that it has succeeded. func (tr *TaskRun) IsSuccessful() bool { return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() } +// IsFailure returns true if the TaskRun's status indicates that it has failed. +func (tr *TaskRun) IsFailure() bool { + return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsFalse() +} + // IsCancelled returns true if the TaskRun's spec status is set to Cancelled state func (tr *TaskRun) IsCancelled() bool { return tr.Spec.Status == TaskRunSpecStatusCancelled diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go index 055094bb..c818a576 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go @@ -62,9 +62,12 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { } // Validate TaskSpec if it's present. if ts.TaskSpec != nil { - // skip validation of parameter and workspaces variables since we validate them via taskrunspec below. - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, true) errs = errs.Also(ts.TaskSpec.Validate(ctx).ViaField("taskSpec")) + // Validate beta fields separately for inline Task definitions. + // This prevents validation from failing in the reconciler when a Task is converted to a different API version. + // See https://github.com/tektoncd/pipeline/issues/6616 for more information. + // TODO(#6592): Decouple API versioning from feature versioning + errs = errs.Also(ts.TaskSpec.ValidateBetaFields(ctx).ViaField("taskSpec")) } errs = errs.Also(ValidateParameters(ctx, ts.Params).ViaField("params")) @@ -141,7 +144,8 @@ func (ts *TaskRunSpec) validateInlineParameters(ctx context.Context) (errs *apis } if ts.TaskSpec != nil && ts.TaskSpec.Steps != nil { errs = errs.Also(ValidateParameterTypes(ctx, paramSpec)) - errs = errs.Also(ValidateParameterVariables(config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), ts.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateParameterVariables(ctx, ts.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, ts.TaskSpec.Steps, paramSpec)) } return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/when_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/when_types.go index 7e0ef914..58af7273 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/when_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/when_types.go @@ -102,9 +102,9 @@ func (wes WhenExpressions) AllowsExecution() bool { return true } -// ReplaceWhenExpressionsVariables interpolates variables, such as Parameters and Results, in +// ReplaceVariables interpolates variables, such as Parameters and Results, in // the Input and Values. -func (wes WhenExpressions) ReplaceWhenExpressionsVariables(replacements map[string]string, arrayReplacements map[string][]string) WhenExpressions { +func (wes WhenExpressions) ReplaceVariables(replacements map[string]string, arrayReplacements map[string][]string) WhenExpressions { replaced := wes for i := range wes { replaced[i] = wes[i].applyReplacements(replacements, arrayReplacements) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go index 4be7c585..57c60467 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go @@ -19,8 +19,6 @@ package v1 import ( "context" - "github.com/tektoncd/pipeline/pkg/apis/config" - "github.com/tektoncd/pipeline/pkg/apis/version" "k8s.io/apimachinery/pkg/api/equality" "knative.dev/pkg/apis" ) @@ -68,25 +66,15 @@ func (b *WorkspaceBinding) Validate(ctx context.Context) (errs *apis.FieldError) return apis.ErrMissingField("secret.secretName") } - // The projected workspace is only supported when the beta feature gate is enabled. // For a Projected volume to work, you must provide at least one source. if b.Projected != nil && len(b.Projected.Sources) == 0 { - errs := version.ValidateEnabledAPIFields(ctx, "projected workspace type", config.BetaAPIFields).ViaField("workspaces") - if errs != nil { - return errs - } if len(b.Projected.Sources) == 0 { return apis.ErrMissingField("projected.sources") } } - // The csi workspace is only supported when the beta feature gate is enabled. // For a CSI to work, you must provide and have installed the driver to use. if b.CSI != nil { - errs := version.ValidateEnabledAPIFields(ctx, "csi workspace type", config.BetaAPIFields).ViaField("workspaces") - if errs != nil { - return errs - } if b.CSI.Driver == "" { return apis.ErrMissingField("csi.driver") } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go index 342f3717..5dfa6278 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go @@ -19,7 +19,6 @@ package v1beta1 import ( "context" - "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" "knative.dev/pkg/apis" ) @@ -32,6 +31,8 @@ func (t *ClusterTask) Validate(ctx context.Context) *apis.FieldError { return nil } errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false) - return errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + errs = errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + // We do not support propagated parameters in ClusterTasks. + // Validate that all params the ClusterTask uses are declared. + return errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.Spec.Steps, t.Spec.Params)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_conversion.go index 816e4e99..5bf1365f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_conversion.go @@ -47,10 +47,6 @@ func (s Step) convertTo(ctx context.Context, sink *v1.Step) { sink.OnError = (v1.OnErrorType)(s.OnError) sink.StdoutConfig = (*v1.StepOutputConfig)(s.StdoutConfig) sink.StderrConfig = (*v1.StepOutputConfig)(s.StderrConfig) - - // TODO(#4546): Handle deprecated fields - // Ports, LivenessProbe, ReadinessProbe, StartupProbe, Lifecycle, TerminationMessagePath - // TerminationMessagePolicy, Stdin, StdinOnce, TTY } func (s *Step) convertFrom(ctx context.Context, source v1.Step) { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go index 23327003..e74f4834 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go @@ -223,11 +223,16 @@ func (r *CustomRun) HasStarted() bool { return r.Status.StartTime != nil && !r.Status.StartTime.IsZero() } -// IsSuccessful returns true if the CustomRun's status indicates that it is done. +// IsSuccessful returns true if the CustomRun's status indicates that it has succeeded. func (r *CustomRun) IsSuccessful() bool { return r != nil && r.Status.GetCondition(apis.ConditionSucceeded).IsTrue() } +// IsFailure returns true if the CustomRun's status indicates that it has failed. +func (r *CustomRun) IsFailure() bool { + return r != nil && r.Status.GetCondition(apis.ConditionSucceeded).IsFalse() +} + // GetCustomRunKey return the customrun's key for timeout handler map func (r *CustomRun) GetCustomRunKey() string { // The address of the pointer is a threadsafe unique identifier for the customrun diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go index f1c86d4e..19042e11 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go @@ -300,29 +300,17 @@ func (m *Matrix) validateCombinationsCount(ctx context.Context) (errs *apis.Fiel return errs } -// validateParams validates the type of Parameter for Matrix.Params and Matrix.Include.Params -// Matrix.Params must be of type array. Matrix.Include.Params must be of type string. -// validateParams also validates Matrix.Params for a unique list of params +// validateUniqueParams validates Matrix.Params for a unique list of params // and a unique list of params in each Matrix.Include.Params specification -func (m *Matrix) validateParams() (errs *apis.FieldError) { +func (m *Matrix) validateUniqueParams() (errs *apis.FieldError) { if m != nil { if m.HasInclude() { for i, include := range m.Include { errs = errs.Also(include.Params.validateDuplicateParameters().ViaField(fmt.Sprintf("matrix.include[%d].params", i))) - for _, param := range include.Params { - if param.Value.Type != ParamTypeString { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("parameters of type string only are allowed, but got param type %s", string(param.Value.Type)), "").ViaFieldKey("matrix.include.params", param.Name)) - } - } } } if m.HasParams() { errs = errs.Also(m.Params.validateDuplicateParameters().ViaField("matrix.params")) - for _, param := range m.Params { - if param.Value.Type != ParamTypeArray { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("parameters of type array only are allowed, but got param type %s", string(param.Value.Type)), "").ViaFieldKey("matrix.params", param.Name)) - } - } } } return errs diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go index df879cd5..ce92c8d8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go @@ -1404,7 +1404,7 @@ func schema_pkg_apis_pipeline_v1beta1_Pipeline(ref common.ReferenceCallback) com return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Pipeline describes a list of Tasks to execute. It expresses how outputs of tasks feed into inputs of subsequent tasks.", + Description: "Pipeline describes a list of Tasks to execute. It expresses how outputs of tasks feed into inputs of subsequent tasks.\n\nDeprecated: Please use v1.Pipeline instead.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -1673,7 +1673,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRun(ref common.ReferenceCallback) return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineRun represents a single execution of a Pipeline. PipelineRuns are how the graph of Tasks declared in a Pipeline are executed; they specify inputs to Pipelines such as parameter values and capture operational aspects of the Tasks execution such as service account and tolerations. Creating a PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.", + Description: "PipelineRun represents a single execution of a Pipeline. PipelineRuns are how the graph of Tasks declared in a Pipeline are executed; they specify inputs to Pipelines such as parameter values and capture operational aspects of the Tasks execution such as service account and tolerations. Creating a PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.\n\nDeprecated: Please use v1.PipelineRun instead.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -2632,7 +2632,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref common.ReferenceCallback) }, "timeout": { SchemaProps: spec.SchemaProps{ - Description: "Time after which the TaskRun times out. Defaults to 1 hour. Specified TaskRun timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Description: "Time after which the TaskRun times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -4206,7 +4206,7 @@ func schema_pkg_apis_pipeline_v1beta1_Task(ref common.ReferenceCallback) common. return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", + Description: "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.\n\nDeprecated: Please use v1.Task instead.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -4549,7 +4549,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRun(ref common.ReferenceCallback) comm return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskRun represents a single execution of a Task. TaskRuns are how the steps specified in a Task are executed; they specify the parameters and resources used to run the steps in a Task.", + Description: "TaskRun represents a single execution of a Task. TaskRuns are how the steps specified in a Task are executed; they specify the parameters and resources used to run the steps in a Task.\n\nDeprecated: Please use v1.TaskRun instead.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -4956,7 +4956,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunSpec(ref common.ReferenceCallback) }, "timeout": { SchemaProps: spec.SchemaProps{ - Description: "Time after which one retry attempt times out. Defaults to 1 hour. Specified build timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Description: "Time after which one retry attempt times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -6012,7 +6012,7 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestSpec(ref common.Referen Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"), }, }, }, @@ -6022,7 +6022,7 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestSpec(ref common.Referen }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"}, } } @@ -6087,13 +6087,13 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatus(ref common.Refer "source": { SchemaProps: spec.SchemaProps{ Description: "Deprecated: Use RefSource instead", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"), }, }, "refSource": { SchemaProps: spec.SchemaProps{ Description: "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"), }, }, }, @@ -6101,7 +6101,7 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatus(ref common.Refer }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource", "knative.dev/pkg/apis.Condition"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource", "knative.dev/pkg/apis.Condition"}, } } @@ -6123,13 +6123,13 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatusFields(ref common "source": { SchemaProps: spec.SchemaProps{ Description: "Deprecated: Use RefSource instead", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"), }, }, "refSource": { SchemaProps: spec.SchemaProps{ Description: "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"), }, }, }, @@ -6137,6 +6137,6 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatusFields(ref common }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"}, } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_conversion.go index 18de6bd7..5ac4f50a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_conversion.go @@ -77,7 +77,8 @@ func (p Param) convertTo(ctx context.Context, sink *v1.Param) { sink.Value = newValue } -func (p *Param) convertFrom(ctx context.Context, source v1.Param) { +// ConvertFrom converts v1beta1 Param from v1 Param +func (p *Param) ConvertFrom(ctx context.Context, source v1.Param) { p.Name = source.Name newValue := ParamValue{} newValue.convertFrom(ctx, source.Value) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go index a2d7e78f..c703fcd5 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go @@ -94,6 +94,51 @@ func (pp *ParamSpec) SetDefaults(context.Context) { } } +// getNames returns all the names of the declared parameters +func (ps ParamSpecs) getNames() []string { + var names []string + for _, p := range ps { + names = append(names, p.Name) + } + return names +} + +// sortByType splits the input params into string params, array params, and object params, in that order +func (ps ParamSpecs) sortByType() (ParamSpecs, ParamSpecs, ParamSpecs) { + var stringParams, arrayParams, objectParams ParamSpecs + for _, p := range ps { + switch p.Type { + case ParamTypeArray: + arrayParams = append(arrayParams, p) + case ParamTypeObject: + objectParams = append(objectParams, p) + case ParamTypeString: + fallthrough + default: + stringParams = append(stringParams, p) + } + } + return stringParams, arrayParams, objectParams +} + +// validateNoDuplicateNames returns an error if any of the params have the same name +func (ps ParamSpecs) validateNoDuplicateNames() *apis.FieldError { + names := ps.getNames() + seen := sets.String{} + dups := sets.String{} + var errs *apis.FieldError + for _, n := range names { + if seen.Has(n) { + dups.Insert(n) + } + seen.Insert(n) + } + for n := range dups { + errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", n)) + } + return errs +} + // setDefaultsForProperties sets default type for PropertySpec (string) if it's not specified func (pp *ParamSpec) setDefaultsForProperties() { for key, propertySpec := range pp.Properties { @@ -143,9 +188,9 @@ func (ps Params) extractParamMapArrVals() map[string][]string { return paramsMap } -// extractParamArrayLengths extract and return the lengths of all array params +// ExtractParamArrayLengths extract and return the lengths of all array params // Example of returned value: {"a-array-params": 2,"b-array-params": 2 } -func (ps Params) extractParamArrayLengths() map[string]int { +func (ps Params) ExtractParamArrayLengths() map[string]int { // Collect all array params arrayParamsLengths := make(map[string]int) @@ -171,9 +216,18 @@ func (ps Params) validateDuplicateParameters() (errs *apis.FieldError) { return errs } -// extractParamArrayLengths extract and return the lengths of all array params +// ReplaceVariables applies string, array and object replacements to variables in Params +func (ps Params) ReplaceVariables(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) Params { + params := ps.DeepCopy() + for i := range params { + params[i].Value.ApplyReplacements(stringReplacements, arrayReplacements, objectReplacements) + } + return params +} + +// ExtractDefaultParamArrayLengths extract and return the lengths of all array param defaults // Example of returned value: {"a-array-params": 2,"b-array-params": 2 } -func (ps ParamSpecs) extractParamArrayLengths() map[string]int { +func (ps ParamSpecs) ExtractDefaultParamArrayLengths() map[string]int { // Collect all array params arrayParamsLengths := make(map[string]int) @@ -188,30 +242,6 @@ func (ps ParamSpecs) extractParamArrayLengths() map[string]int { return arrayParamsLengths } -// validateOutofBoundArrayParams validates if the array indexing params are out of bound -// example of arrayIndexingParams: ["$(params.a-array-param[1])", "$(params.b-array-param[2])"] -// example of arrayParamsLengths: {"a-array-params": 2,"b-array-params": 2 } -func validateOutofBoundArrayParams(arrayIndexingParams []string, arrayParamsLengths map[string]int) error { - outofBoundParams := sets.String{} - for _, val := range arrayIndexingParams { - indexString := substitution.ExtractIndexString(val) - idx, _ := substitution.ExtractIndex(indexString) - // this will extract the param name from reference - // e.g. $(params.a-array-param[1]) -> a-array-param - paramName, _, _ := substitution.ExtractVariablesFromString(substitution.TrimArrayIndex(val), "params") - - if paramLength, ok := arrayParamsLengths[paramName[0]]; ok { - if idx >= paramLength { - outofBoundParams.Insert(val) - } - } - } - if outofBoundParams.Len() > 0 { - return fmt.Errorf("non-existent param references:%v", outofBoundParams.List()) - } - return nil -} - // extractArrayIndexingParamRefs takes a string of the form `foo-$(params.array-param[1])-bar` and extracts the portions of the string that reference an element in an array param. // For example, for the string “foo-$(params.array-param[1])-bar-$(params.other-array-param[2])-$(params.string-param)`, // it would return ["$(params.array-param[1])", "$(params.other-array-param[2])"]. @@ -569,23 +599,23 @@ func validateParamStringValue(param Param, prefix string, paramNames sets.String // validateStringVariable validates the normal string fields that can only accept references to string param or individual keys of object param func validateStringVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError { - errs := substitution.ValidateVariableP(value, prefix, stringVars) + errs := substitution.ValidateNoReferencesToUnknownVariables(value, prefix, stringVars) errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys)) - return errs.Also(substitution.ValidateVariableProhibitedP(value, prefix, arrayVars)) + return errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(value, prefix, arrayVars)) } func validateArrayVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError { - errs := substitution.ValidateVariableP(value, prefix, stringVars) + errs := substitution.ValidateNoReferencesToUnknownVariables(value, prefix, stringVars) errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys)) - return errs.Also(substitution.ValidateVariableIsolatedP(value, prefix, arrayVars)) + return errs.Also(substitution.ValidateVariableReferenceIsIsolated(value, prefix, arrayVars)) } func validateObjectVariable(value, prefix string, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { objectNames := sets.NewString() for objectParamName, keys := range objectParamNameKeys { objectNames.Insert(objectParamName) - errs = errs.Also(substitution.ValidateVariableP(value, fmt.Sprintf("%s\\.%s", prefix, objectParamName), sets.NewString(keys...))) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(value, fmt.Sprintf("%s\\.%s", prefix, objectParamName), sets.NewString(keys...))) } - return errs.Also(substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames)) + return errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(value, prefix, objectNames)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go index 0c66a3bd..d6b4c770 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go @@ -20,6 +20,8 @@ import ( "context" "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "knative.dev/pkg/apis" ) @@ -34,20 +36,20 @@ func (p *Pipeline) ConvertTo(ctx context.Context, to apis.Convertible) error { switch sink := to.(type) { case *v1.Pipeline: sink.ObjectMeta = p.ObjectMeta - return p.Spec.ConvertTo(ctx, &sink.Spec) + return p.Spec.ConvertTo(ctx, &sink.Spec, &sink.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", sink) } } // ConvertTo implements apis.Convertible -func (ps *PipelineSpec) ConvertTo(ctx context.Context, sink *v1.PipelineSpec) error { +func (ps *PipelineSpec) ConvertTo(ctx context.Context, sink *v1.PipelineSpec, meta *metav1.ObjectMeta) error { sink.DisplayName = ps.DisplayName sink.Description = ps.Description sink.Tasks = nil for _, t := range ps.Tasks { new := v1.PipelineTask{} - err := t.convertTo(ctx, &new) + err := t.convertTo(ctx, &new, meta) if err != nil { return err } @@ -74,7 +76,7 @@ func (ps *PipelineSpec) ConvertTo(ctx context.Context, sink *v1.PipelineSpec) er sink.Finally = nil for _, f := range ps.Finally { new := v1.PipelineTask{} - err := f.convertTo(ctx, &new) + err := f.convertTo(ctx, &new, meta) if err != nil { return err } @@ -88,20 +90,20 @@ func (p *Pipeline) ConvertFrom(ctx context.Context, from apis.Convertible) error switch source := from.(type) { case *v1.Pipeline: p.ObjectMeta = source.ObjectMeta - return p.Spec.ConvertFrom(ctx, &source.Spec) + return p.Spec.ConvertFrom(ctx, &source.Spec, &p.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", p) } } // ConvertFrom implements apis.Convertible -func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source *v1.PipelineSpec) error { +func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source *v1.PipelineSpec, meta *metav1.ObjectMeta) error { ps.DisplayName = source.DisplayName ps.Description = source.Description ps.Tasks = nil for _, t := range source.Tasks { new := PipelineTask{} - err := new.convertFrom(ctx, t) + err := new.convertFrom(ctx, t, meta) if err != nil { return err } @@ -128,7 +130,7 @@ func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source *v1.PipelineSpec ps.Finally = nil for _, f := range source.Finally { new := PipelineTask{} - err := new.convertFrom(ctx, f) + err := new.convertFrom(ctx, f, meta) if err != nil { return err } @@ -137,7 +139,7 @@ func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source *v1.PipelineSpec return nil } -func (pt PipelineTask) convertTo(ctx context.Context, sink *v1.PipelineTask) error { +func (pt PipelineTask) convertTo(ctx context.Context, sink *v1.PipelineTask, meta *metav1.ObjectMeta) error { sink.Name = pt.Name sink.DisplayName = pt.DisplayName sink.Description = pt.Description @@ -147,7 +149,7 @@ func (pt PipelineTask) convertTo(ctx context.Context, sink *v1.PipelineTask) err } if pt.TaskSpec != nil { sink.TaskSpec = &v1.EmbeddedTask{} - err := pt.TaskSpec.convertTo(ctx, sink.TaskSpec) + err := pt.TaskSpec.convertTo(ctx, sink.TaskSpec, meta, pt.Name) if err != nil { return err } @@ -183,18 +185,18 @@ func (pt PipelineTask) convertTo(ctx context.Context, sink *v1.PipelineTask) err return nil } -func (pt *PipelineTask) convertFrom(ctx context.Context, source v1.PipelineTask) error { +func (pt *PipelineTask) convertFrom(ctx context.Context, source v1.PipelineTask, meta *metav1.ObjectMeta) error { pt.Name = source.Name pt.DisplayName = source.DisplayName pt.Description = source.Description if source.TaskRef != nil { newTaskRef := TaskRef{} - newTaskRef.convertFrom(ctx, *source.TaskRef) + newTaskRef.ConvertFrom(ctx, *source.TaskRef) pt.TaskRef = &newTaskRef } if source.TaskSpec != nil { newTaskSpec := EmbeddedTask{} - err := newTaskSpec.convertFrom(ctx, *source.TaskSpec) + err := newTaskSpec.convertFrom(ctx, *source.TaskSpec, meta, pt.Name) pt.TaskSpec = &newTaskSpec if err != nil { return err @@ -211,7 +213,7 @@ func (pt *PipelineTask) convertFrom(ctx context.Context, source v1.PipelineTask) pt.Params = nil for _, p := range source.Params { new := Param{} - new.convertFrom(ctx, p) + new.ConvertFrom(ctx, p) pt.Params = append(pt.Params, new) } pt.Matrix = nil @@ -231,20 +233,20 @@ func (pt *PipelineTask) convertFrom(ctx context.Context, source v1.PipelineTask) return nil } -func (et EmbeddedTask) convertTo(ctx context.Context, sink *v1.EmbeddedTask) error { +func (et EmbeddedTask) convertTo(ctx context.Context, sink *v1.EmbeddedTask, meta *metav1.ObjectMeta, taskName string) error { sink.TypeMeta = et.TypeMeta sink.Spec = et.Spec sink.Metadata = v1.PipelineTaskMetadata(et.Metadata) sink.TaskSpec = v1.TaskSpec{} - return et.TaskSpec.ConvertTo(ctx, &sink.TaskSpec) + return et.TaskSpec.ConvertTo(ctx, &sink.TaskSpec, meta, taskName) } -func (et *EmbeddedTask) convertFrom(ctx context.Context, source v1.EmbeddedTask) error { +func (et *EmbeddedTask) convertFrom(ctx context.Context, source v1.EmbeddedTask, meta *metav1.ObjectMeta, taskName string) error { et.TypeMeta = source.TypeMeta et.Spec = source.Spec et.Metadata = PipelineTaskMetadata(source.Metadata) et.TaskSpec = TaskSpec{} - return et.TaskSpec.ConvertFrom(ctx, &source.TaskSpec) + return et.TaskSpec.ConvertFrom(ctx, &source.TaskSpec, meta, taskName) } func (we WhenExpression) convertTo(ctx context.Context, sink *v1.WhenExpression) { @@ -278,7 +280,7 @@ func (m *Matrix) convertTo(ctx context.Context, sink *v1.Matrix) { func (m *Matrix) convertFrom(ctx context.Context, source v1.Matrix) { for _, param := range source.Params { new := Param{} - new.convertFrom(ctx, param) + new.ConvertFrom(ctx, param) m.Params = append(m.Params, new) } @@ -286,7 +288,7 @@ func (m *Matrix) convertFrom(ctx context.Context, source v1.Matrix) { m.Include = append(m.Include, IncludeParams{Name: include.Name}) for _, p := range include.Params { new := Param{} - new.convertFrom(ctx, p) + new.ConvertFrom(ctx, p) m.Include[i].Params = append(m.Include[i].Params, new) } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go index efb6607d..4426d5dd 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go @@ -39,10 +39,12 @@ const ( // +genclient:noStatus // +genreconciler:krshapedlogic=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true // Pipeline describes a list of Tasks to execute. It expresses how outputs // of tasks feed into inputs of subsequent tasks. -// +k8s:openapi-gen=true +// +// Deprecated: Please use v1.Pipeline instead. type Pipeline struct { metav1.TypeMeta `json:",inline"` // +optional @@ -215,7 +217,6 @@ type PipelineTask struct { Workspaces []WorkspacePipelineTaskBinding `json:"workspaces,omitempty"` // Time after which the TaskRun times out. Defaults to 1 hour. - // Specified TaskRun timeout should be less than 24h. // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go index c170e85a..6411d3f0 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go @@ -47,8 +47,12 @@ func (p *Pipeline) SupportedVerbs() []admissionregistrationv1.OperationType { // that any references resources exist, that is done at run time. func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError { errs := validate.ObjectMetadata(p.GetObjectMeta()).ViaField("metadata") - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false) - return errs.Also(p.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + errs = errs.Also(p.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + // When a Pipeline is created directly, instead of declared inline in a PipelineRun, + // we do not support propagated parameters and workspaces. + // Validate that all params and workspaces it uses are declared. + errs = errs.Also(p.Spec.validatePipelineParameterUsage(ctx).ViaField("spec")) + return errs.Also(p.Spec.validatePipelineWorkspacesUsage().ViaField("spec")) } // Validate checks that taskNames in the Pipeline are valid and that the graph @@ -72,8 +76,6 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(validateExecutionStatusVariables(ps.Tasks, ps.Finally)) // Validate the pipeline's workspaces. errs = errs.Also(validatePipelineWorkspacesDeclarations(ps.Workspaces)) - errs = errs.Also(validatePipelineWorkspacesUsage(ctx, ps.Workspaces, ps.Tasks).ViaField("tasks")) - errs = errs.Also(validatePipelineWorkspacesUsage(ctx, ps.Workspaces, ps.Finally).ViaField("finally")) // Validate the pipeline's results errs = errs.Also(validatePipelineResults(ps.Results, ps.Tasks, ps.Finally)) errs = errs.Also(validateTasksAndFinallySection(ps)) @@ -111,6 +113,16 @@ func (l PipelineTaskList) Validate(ctx context.Context, taskNames sets.String, p return errs } +// validateUsageOfDeclaredPipelineTaskParameters validates that all parameters referenced in the pipeline Task are declared by the pipeline Task. +func (l PipelineTaskList) validateUsageOfDeclaredPipelineTaskParameters(ctx context.Context, path string) (errs *apis.FieldError) { + for i, t := range l { + if t.TaskSpec != nil { + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.TaskSpec.Steps, t.TaskSpec.Params).ViaFieldIndex(path, i)) + } + } + return errs +} + // ValidateName checks whether the PipelineTask's name is a valid DNS label func (pt PipelineTask) ValidateName() *apis.FieldError { if err := validation.IsDNS1123Label(pt.Name); len(err) > 0 { @@ -134,12 +146,22 @@ func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { if pt.Resources != nil { errs = errs.Also(apis.ErrDisallowedFields("resources")) } - + // taskKinds contains the kinds when the apiVersion is not set, they are not custom tasks, + // if apiVersion is set they are custom tasks. + taskKinds := map[TaskKind]bool{ + "": true, + NamespacedTaskKind: true, + ClusterTaskKind: true, + } cfg := config.FromContextOrDefaults(ctx) // Pipeline task having taskRef/taskSpec with APIVersion is classified as custom task switch { + case pt.TaskRef != nil && !taskKinds[pt.TaskRef.Kind]: + errs = errs.Also(pt.validateCustomTask()) case pt.TaskRef != nil && pt.TaskRef.APIVersion != "": errs = errs.Also(pt.validateCustomTask()) + case pt.TaskSpec != nil && !taskKinds[TaskKind(pt.TaskSpec.Kind)]: + errs = errs.Also(pt.validateCustomTask()) case pt.TaskSpec != nil && pt.TaskSpec.APIVersion != "": errs = errs.Also(pt.validateCustomTask()) // If EnableTektonOCIBundles feature flag is on, validate bundle specifications @@ -148,7 +170,7 @@ func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { default: errs = errs.Also(pt.validateTask(ctx)) } - return + return //nolint:nakedret } func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldError) { @@ -157,9 +179,9 @@ func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldErr // when the enable-api-fields feature gate is anything but "alpha". errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) errs = errs.Also(pt.Matrix.validateCombinationsCount(ctx)) + errs = errs.Also(pt.Matrix.validateUniqueParams()) } errs = errs.Also(pt.Matrix.validateParameterInOneOfMatrixOrParams(pt.Params)) - errs = errs.Also(pt.Matrix.validateParams()) return errs } @@ -266,24 +288,11 @@ func (pt PipelineTask) validateBundle() (errs *apis.FieldError) { // validateTask validates a pipeline task or a final task for taskRef and taskSpec func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) - // Validate TaskSpec if it's present if pt.TaskSpec != nil { errs = errs.Also(pt.TaskSpec.Validate(ctx).ViaField("taskSpec")) } if pt.TaskRef != nil { - if pt.TaskRef.Name != "" { - // TaskRef name must be a valid k8s name - if errSlice := validation.IsQualifiedName(pt.TaskRef.Name); len(errSlice) != 0 { - errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "taskRef.name")) - } - } else if pt.TaskRef.Resolver == "" { - errs = errs.Also(apis.ErrInvalidValue("taskRef must specify name", "taskRef.name")) - } - // fail if bundle is present when EnableTektonOCIBundles feature flag is off (as it won't be allowed nor used) - if !cfg.FeatureFlags.EnableTektonOCIBundles && pt.TaskRef.Bundle != "" { - errs = errs.Also(apis.ErrDisallowedFields("taskRef.bundle")) - } + errs = errs.Also(pt.TaskRef.Validate(ctx).ViaField("taskRef")) } return errs } @@ -307,12 +316,43 @@ func validatePipelineWorkspacesDeclarations(wss []PipelineWorkspaceDeclaration) return errs } -// validatePipelineWorkspacesUsage validates that all the referenced workspaces (by pipeline tasks) are specified in -// the pipeline -func validatePipelineWorkspacesUsage(ctx context.Context, wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) { - if !config.ValidateParameterVariablesAndWorkspaces(ctx) { - return nil +// validatePipelineParameterUsage validates that parameters referenced in the Pipeline are declared by the Pipeline +func (ps *PipelineSpec) validatePipelineParameterUsage(ctx context.Context) (errs *apis.FieldError) { + errs = errs.Also(PipelineTaskList(ps.Tasks).validateUsageOfDeclaredPipelineTaskParameters(ctx, "tasks")) + errs = errs.Also(PipelineTaskList(ps.Finally).validateUsageOfDeclaredPipelineTaskParameters(ctx, "finally")) + errs = errs.Also(validatePipelineTaskParameterUsage(ps.Tasks, ps.Params).ViaField("tasks")) + errs = errs.Also(validatePipelineTaskParameterUsage(ps.Finally, ps.Params).ViaField("finally")) + return errs +} + +// validatePipelineTaskParameterUsage validates that parameters referenced in the Pipeline Tasks are declared by the Pipeline +func validatePipelineTaskParameterUsage(tasks []PipelineTask, params ParamSpecs) (errs *apis.FieldError) { + allParamNames := sets.NewString(params.getNames()...) + _, arrayParams, objectParams := params.sortByType() + arrayParamNames := sets.NewString(arrayParams.getNames()...) + objectParameterNameKeys := map[string][]string{} + for _, p := range objectParams { + for k := range p.Properties { + objectParameterNameKeys[p.Name] = append(objectParameterNameKeys[p.Name], k) + } } + errs = errs.Also(validatePipelineParametersVariables(tasks, "params", allParamNames, arrayParamNames, objectParameterNameKeys)) + for i, task := range tasks { + errs = errs.Also(task.Params.validateDuplicateParameters().ViaFieldIndex("params", i)) + } + return errs +} + +// validatePipelineWorkspacesUsage validates that Workspaces referenced in the Pipeline are declared by the Pipeline +func (ps *PipelineSpec) validatePipelineWorkspacesUsage() (errs *apis.FieldError) { + errs = errs.Also(validatePipelineTasksWorkspacesUsage(ps.Workspaces, ps.Tasks).ViaField("tasks")) + errs = errs.Also(validatePipelineTasksWorkspacesUsage(ps.Workspaces, ps.Finally).ViaField("finally")) + return errs +} + +// validatePipelineTasksWorkspacesUsage validates that all the referenced workspaces (by pipeline tasks) are specified in +// the pipeline +func validatePipelineTasksWorkspacesUsage(wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) { workspaceNames := sets.NewString() for _, ws := range wss { workspaceNames.Insert(ws.Name) @@ -326,33 +366,13 @@ func validatePipelineWorkspacesUsage(ctx context.Context, wss []PipelineWorkspac // ValidatePipelineParameterVariables validates parameters with those specified by each pipeline task, // (1) it validates the type of parameter is either string or array (2) parameter default value matches -// with the type of that param (3) ensures that the referenced param variable is defined is part of the param declarations -func ValidatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params []ParamSpec) (errs *apis.FieldError) { - parameterNames := sets.NewString() - arrayParameterNames := sets.NewString() - objectParameterNameKeys := map[string][]string{} - +// with the type of that param +func ValidatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params ParamSpecs) (errs *apis.FieldError) { // validates all the types within a slice of ParamSpecs errs = errs.Also(ValidateParameterTypes(ctx, params).ViaField("params")) - - for _, p := range params { - if parameterNames.Has(p.Name) { - errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) - } - // Add parameter name to parameterNames, and to arrayParameterNames if type is array. - parameterNames.Insert(p.Name) - if p.Type == ParamTypeArray { - arrayParameterNames.Insert(p.Name) - } - - if p.Type == ParamTypeObject { - for k := range p.Properties { - objectParameterNameKeys[p.Name] = append(objectParameterNameKeys[p.Name], k) - } - } - } - if config.ValidateParameterVariablesAndWorkspaces(ctx) { - errs = errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames, objectParameterNameKeys)) + errs = errs.Also(params.validateNoDuplicateNames()) + for i, task := range tasks { + errs = errs.Also(task.Params.validateDuplicateParameters().ViaField("params").ViaIndex(i)) } return errs } @@ -516,7 +536,7 @@ func validateExecutionStatusVariablesExpressions(expressions []string, ptNames s func validatePipelineContextVariablesInParamValues(paramValues []string, prefix string, contextNames sets.String) (errs *apis.FieldError) { for _, paramValue := range paramValues { - errs = errs.Also(substitution.ValidateVariableP(paramValue, prefix, contextNames).ViaField("value")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(paramValue, prefix, contextNames).ViaField("value")) } return errs } @@ -699,20 +719,11 @@ func validateResultsFromMatrixedPipelineTasksNotConsumed(tasks []PipelineTask, f return errs } -// ValidateParamArrayIndex validates if the param reference to an array param is out of bound. -// error is returned when the array indexing reference is out of bound of the array param -// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2. -func (ps *PipelineSpec) ValidateParamArrayIndex(ctx context.Context, params Params) error { - if !config.CheckAlphaOrBetaAPIFields(ctx) { - return nil - } - - // Collect all array params lengths - arrayParamsLengths := ps.Params.extractParamArrayLengths() - for k, v := range params.extractParamArrayLengths() { - arrayParamsLengths[k] = v - } - +// GetIndexingReferencesToArrayParams returns all strings referencing indices of PipelineRun array parameters +// from parameters, workspaces, and when expressions defined in the Pipeline's Tasks and Finally Tasks. +// For example, if a Task in the Pipeline has a parameter with a value "$(params.array-param-name[1])", +// this would be one of the strings returned. +func (ps *PipelineSpec) GetIndexingReferencesToArrayParams() sets.String { paramsRefs := []string{} for i := range ps.Tasks { paramsRefs = append(paramsRefs, ps.Tasks[i].Params.extractValues()...) @@ -727,22 +738,20 @@ func (ps *PipelineSpec) ValidateParamArrayIndex(ctx context.Context, params Para paramsRefs = append(paramsRefs, wes.Values...) } } - for i := range ps.Finally { paramsRefs = append(paramsRefs, ps.Finally[i].Params.extractValues()...) if ps.Finally[i].IsMatrixed() { paramsRefs = append(paramsRefs, ps.Finally[i].Matrix.Params.extractValues()...) } for _, wes := range ps.Finally[i].WhenExpressions { + paramsRefs = append(paramsRefs, wes.Input) paramsRefs = append(paramsRefs, wes.Values...) } } - // extract all array indexing references, for example []{"$(params.array-params[1])"} arrayIndexParamRefs := []string{} for _, p := range paramsRefs { arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...) } - - return validateOutofBoundArrayParams(arrayIndexParamRefs, arrayParamsLengths) + return sets.NewString(arrayIndexParamRefs...) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go index 68a47523..0b3f4527 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go @@ -20,6 +20,8 @@ import ( "context" "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "knative.dev/pkg/apis" ) @@ -34,24 +36,24 @@ func (pr *PipelineRun) ConvertTo(ctx context.Context, to apis.Convertible) error switch sink := to.(type) { case *v1.PipelineRun: sink.ObjectMeta = pr.ObjectMeta - if err := pr.Status.convertTo(ctx, &sink.Status); err != nil { + if err := pr.Status.convertTo(ctx, &sink.Status, &sink.ObjectMeta); err != nil { return err } - return pr.Spec.ConvertTo(ctx, &sink.Spec) + return pr.Spec.ConvertTo(ctx, &sink.Spec, &sink.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", sink) } } // ConvertTo implements apis.Convertible -func (prs PipelineRunSpec) ConvertTo(ctx context.Context, sink *v1.PipelineRunSpec) error { +func (prs PipelineRunSpec) ConvertTo(ctx context.Context, sink *v1.PipelineRunSpec, meta *metav1.ObjectMeta) error { if prs.PipelineRef != nil { sink.PipelineRef = &v1.PipelineRef{} prs.PipelineRef.convertTo(ctx, sink.PipelineRef) } if prs.PipelineSpec != nil { sink.PipelineSpec = &v1.PipelineSpec{} - err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec) + err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec, meta) if err != nil { return err } @@ -94,17 +96,17 @@ func (pr *PipelineRun) ConvertFrom(ctx context.Context, from apis.Convertible) e switch source := from.(type) { case *v1.PipelineRun: pr.ObjectMeta = source.ObjectMeta - if err := pr.Status.convertFrom(ctx, &source.Status); err != nil { + if err := pr.Status.convertFrom(ctx, &source.Status, &pr.ObjectMeta); err != nil { return err } - return pr.Spec.ConvertFrom(ctx, &source.Spec) + return pr.Spec.ConvertFrom(ctx, &source.Spec, &pr.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", pr) } } // ConvertFrom implements apis.Convertible -func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1.PipelineRunSpec) error { +func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1.PipelineRunSpec, meta *metav1.ObjectMeta) error { if source.PipelineRef != nil { newPipelineRef := PipelineRef{} newPipelineRef.convertFrom(ctx, *source.PipelineRef) @@ -112,7 +114,7 @@ func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1.Pipeline } if source.PipelineSpec != nil { newPipelineSpec := PipelineSpec{} - err := newPipelineSpec.ConvertFrom(ctx, source.PipelineSpec) + err := newPipelineSpec.ConvertFrom(ctx, source.PipelineSpec, meta) if err != nil { return err } @@ -121,7 +123,7 @@ func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1.Pipeline prs.Params = nil for _, p := range source.Params { new := Param{} - new.convertFrom(ctx, p) + new.ConvertFrom(ctx, p) prs.Params = append(prs.Params, new) } prs.ServiceAccountName = source.TaskRunTemplate.ServiceAccountName @@ -135,7 +137,7 @@ func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1.Pipeline prs.Workspaces = nil for _, w := range source.Workspaces { new := WorkspaceBinding{} - new.convertFrom(ctx, w) + new.ConvertFrom(ctx, w) prs.Workspaces = append(prs.Workspaces, new) } prs.TaskRunSpecs = nil @@ -206,7 +208,7 @@ func (ptrs *PipelineTaskRunSpec) convertFrom(ctx context.Context, source v1.Pipe ptrs.ComputeResources = source.ComputeResources } -func (prs *PipelineRunStatus) convertTo(ctx context.Context, sink *v1.PipelineRunStatus) error { +func (prs *PipelineRunStatus) convertTo(ctx context.Context, sink *v1.PipelineRunStatus, meta *metav1.ObjectMeta) error { sink.Status = prs.Status sink.StartTime = prs.StartTime sink.CompletionTime = prs.CompletionTime @@ -218,7 +220,7 @@ func (prs *PipelineRunStatus) convertTo(ctx context.Context, sink *v1.PipelineRu } if prs.PipelineSpec != nil { sink.PipelineSpec = &v1.PipelineSpec{} - err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec) + err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec, meta) if err != nil { return err } @@ -244,7 +246,7 @@ func (prs *PipelineRunStatus) convertTo(ctx context.Context, sink *v1.PipelineRu return nil } -func (prs *PipelineRunStatus) convertFrom(ctx context.Context, source *v1.PipelineRunStatus) error { +func (prs *PipelineRunStatus) convertFrom(ctx context.Context, source *v1.PipelineRunStatus, meta *metav1.ObjectMeta) error { prs.Status = source.Status prs.StartTime = source.StartTime prs.CompletionTime = source.CompletionTime @@ -256,7 +258,7 @@ func (prs *PipelineRunStatus) convertFrom(ctx context.Context, source *v1.Pipeli } if source.PipelineSpec != nil { newPipelineSpec := PipelineSpec{} - err := newPipelineSpec.ConvertFrom(ctx, source.PipelineSpec) + err := newPipelineSpec.ConvertFrom(ctx, source.PipelineSpec, meta) if err != nil { return err } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go index 86592824..0b3efc8f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go @@ -18,19 +18,32 @@ package v1beta1 import ( "context" + "regexp" "time" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline" pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" + "knative.dev/pkg/kmap" ) -var _ apis.Defaultable = (*PipelineRun)(nil) +var ( + _ apis.Defaultable = (*PipelineRun)(nil) + filterReservedAnnotationRegexp = regexp.MustCompile(pipeline.TektonReservedAnnotationExpr) +) // SetDefaults implements apis.Defaultable func (pr *PipelineRun) SetDefaults(ctx context.Context) { pr.Spec.SetDefaults(ctx) + + // Silently filtering out Tekton Reserved annotations at creation + if apis.IsInCreate(ctx) { + pr.ObjectMeta.Annotations = kmap.Filter(pr.ObjectMeta.Annotations, func(s string) bool { + return filterReservedAnnotationRegexp.MatchString(s) + }) + } } // SetDefaults implements apis.Defaultable diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go index 53c1f738..4c7ae064 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go @@ -38,6 +38,7 @@ import ( // +genclient // +genreconciler:krshapedlogic=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true // PipelineRun represents a single execution of a Pipeline. PipelineRuns are how // the graph of Tasks declared in a Pipeline are executed; they specify inputs @@ -45,7 +46,7 @@ import ( // Tasks execution such as service account and tolerations. Creating a // PipelineRun creates TaskRuns for Tasks in the referenced Pipeline. // -// +k8s:openapi-gen=true +// Deprecated: Please use v1.PipelineRun instead. type PipelineRun struct { metav1.TypeMeta `json:",inline"` // +optional diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go index ed7ac5cc..307f7b8f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go @@ -71,7 +71,6 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) // Validate PipelineSpec if it's present if ps.PipelineSpec != nil { - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, true) errs = errs.Also(ps.PipelineSpec.Validate(ctx).ViaField("pipelineSpec")) } @@ -232,12 +231,12 @@ func (ps *PipelineRunSpec) validateInlineParameters(ctx context.Context) (errs * for _, pt := range ps.PipelineSpec.Tasks { if pt.TaskSpec != nil && pt.TaskSpec.Steps != nil { errs = errs.Also(ValidateParameterTypes(ctx, paramSpec)) - errs = errs.Also(ValidateParameterVariables( - config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), pt.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateParameterVariables(ctx, pt.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, pt.TaskSpec.Steps, paramSpec)) } } - errs = errs.Also(ValidatePipelineParameterVariables( - config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), ps.PipelineSpec.Tasks, paramSpec)) + errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.PipelineSpec.Tasks, paramSpec)) + errs = errs.Also(validatePipelineTaskParameterUsage(ps.PipelineSpec.Tasks, paramSpec)) } return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go index 3bbed850..93c6b7d2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go @@ -37,7 +37,7 @@ func (rr *ResolverRef) convertFrom(ctx context.Context, source v1.ResolverRef) { rr.Params = nil for _, r := range source.Params { new := Param{} - new.convertFrom(ctx, r) + new.ConvertFrom(ctx, r) rr.Params = append(rr.Params, new) } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_conversion.go index 5e0facad..c695de10 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_conversion.go @@ -26,20 +26,24 @@ func (r TaskResult) convertTo(ctx context.Context, sink *v1.TaskResult) { sink.Name = r.Name sink.Type = v1.ResultsType(r.Type) sink.Description = r.Description - properties := make(map[string]v1.PropertySpec) - for k, v := range r.Properties { - properties[k] = v1.PropertySpec{Type: v1.ParamType(v.Type)} + if r.Properties != nil { + properties := make(map[string]v1.PropertySpec) + for k, v := range r.Properties { + properties[k] = v1.PropertySpec{Type: v1.ParamType(v.Type)} + } + sink.Properties = properties } - sink.Properties = properties } func (r *TaskResult) convertFrom(ctx context.Context, source v1.TaskResult) { r.Name = source.Name r.Type = ResultsType(source.Type) r.Description = source.Description - properties := make(map[string]PropertySpec) - for k, v := range source.Properties { - properties[k] = PropertySpec{Type: ParamType(v.Type)} + if source.Properties != nil { + properties := make(map[string]PropertySpec) + for k, v := range source.Properties { + properties[k] = PropertySpec{Type: ParamType(v.Type)} + } + r.Properties = properties } - r.Properties = properties } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go index fe2fca41..ab9ee83c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go @@ -17,8 +17,6 @@ import ( "context" "fmt" - "github.com/tektoncd/pipeline/pkg/apis/config" - "github.com/tektoncd/pipeline/pkg/apis/version" "knative.dev/pkg/apis" ) @@ -29,14 +27,10 @@ func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { } switch { - // Object results is a beta feature - make sure the feature flag is set to "beta" case tr.Type == ResultsTypeObject: errs := validateObjectResult(tr) - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.BetaAPIFields)) return errs - // Array results is a beta feature - make sure the feature flag is set to "beta" case tr.Type == ResultsTypeArray: - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.BetaAPIFields)) return errs // Resources created before the result. Type was introduced may not have Type set // and should be considered valid diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json index c0b8e277..2bbb8926 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json @@ -680,7 +680,7 @@ } }, "v1beta1.Pipeline": { - "description": "Pipeline describes a list of Tasks to execute. It expresses how outputs of tasks feed into inputs of subsequent tasks.", + "description": "Pipeline describes a list of Tasks to execute. It expresses how outputs of tasks feed into inputs of subsequent tasks.\n\nDeprecated: Please use v1.Pipeline instead.", "type": "object", "properties": { "apiVersion": { @@ -834,7 +834,7 @@ } }, "v1beta1.PipelineRun": { - "description": "PipelineRun represents a single execution of a Pipeline. PipelineRuns are how the graph of Tasks declared in a Pipeline are executed; they specify inputs to Pipelines such as parameter values and capture operational aspects of the Tasks execution such as service account and tolerations. Creating a PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.", + "description": "PipelineRun represents a single execution of a Pipeline. PipelineRuns are how the graph of Tasks declared in a Pipeline are executed; they specify inputs to Pipelines such as parameter values and capture operational aspects of the Tasks execution such as service account and tolerations. Creating a PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.\n\nDeprecated: Please use v1.PipelineRun instead.", "type": "object", "properties": { "apiVersion": { @@ -1314,7 +1314,7 @@ "$ref": "#/definitions/v1beta1.EmbeddedTask" }, "timeout": { - "description": "Time after which the TaskRun times out. Defaults to 1 hour. Specified TaskRun timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "description": "Time after which the TaskRun times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", "$ref": "#/definitions/v1.Duration" }, "when": { @@ -1630,7 +1630,7 @@ "type": "array", "items": { "default": {}, - "$ref": "#/definitions/v1beta1.Param" + "$ref": "#/definitions/v1.Param" }, "x-kubernetes-list-type": "atomic" } @@ -1675,11 +1675,11 @@ }, "refSource": { "description": "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.", - "$ref": "#/definitions/v1beta1.RefSource" + "$ref": "#/definitions/v1.RefSource" }, "source": { "description": "Deprecated: Use RefSource instead", - "$ref": "#/definitions/v1beta1.ConfigSource" + "$ref": "#/definitions/v1.RefSource" } } }, @@ -1699,11 +1699,11 @@ }, "refSource": { "description": "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.", - "$ref": "#/definitions/v1beta1.RefSource" + "$ref": "#/definitions/v1.RefSource" }, "source": { "description": "Deprecated: Use RefSource instead", - "$ref": "#/definitions/v1beta1.ConfigSource" + "$ref": "#/definitions/v1.RefSource" } } }, @@ -2329,7 +2329,7 @@ } }, "v1beta1.Task": { - "description": "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", + "description": "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.\n\nDeprecated: Please use v1.Task instead.", "type": "object", "properties": { "apiVersion": { @@ -2515,7 +2515,7 @@ } }, "v1beta1.TaskRun": { - "description": "TaskRun represents a single execution of a Task. TaskRuns are how the steps specified in a Task are executed; they specify the parameters and resources used to run the steps in a Task.", + "description": "TaskRun represents a single execution of a Task. TaskRuns are how the steps specified in a Task are executed; they specify the parameters and resources used to run the steps in a Task.\n\nDeprecated: Please use v1.TaskRun instead.", "type": "object", "properties": { "apiVersion": { @@ -2756,7 +2756,7 @@ "$ref": "#/definitions/v1beta1.TaskSpec" }, "timeout": { - "description": "Time after which one retry attempt times out. Defaults to 1 hour. Specified build timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "description": "Time after which one retry attempt times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", "$ref": "#/definitions/v1.Duration" }, "workspaces": { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go index 19bbec3f..694e72dd 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go @@ -18,12 +18,43 @@ package v1beta1 import ( "context" + "encoding/json" "fmt" + "github.com/tektoncd/pipeline/pkg/apis/version" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "knative.dev/pkg/apis" ) +// TaskDeprecationsAnnotationKey is the annotation key for all deprecated fields of (a) Task(s) that belong(s) to an object. +// For example: a v1beta1.Pipeline contains two tasks +// +// spec: +// +// tasks: +// - name: task-1 +// stepTemplate: +// name: deprecated-name-field # deprecated field +// - name: task-2 +// steps: +// - tty: true # deprecated field +// +// The annotation would be: +// +// "tekton.dev/v1beta1.task-deprecations": `{ +// "task1":{ +// "deprecatedStepTemplates":{ +// "name":"deprecated-name-field" +// }, +// }, +// "task-2":{ +// "deprecatedSteps":[{"tty":true}], +// }, +// }` +const TaskDeprecationsAnnotationKey = "tekton.dev/v1beta1.task-deprecations" + var _ apis.Convertible = (*Task)(nil) // ConvertTo implements apis.Convertible @@ -34,14 +65,17 @@ func (t *Task) ConvertTo(ctx context.Context, to apis.Convertible) error { switch sink := to.(type) { case *v1.Task: sink.ObjectMeta = t.ObjectMeta - return t.Spec.ConvertTo(ctx, &sink.Spec) + return t.Spec.ConvertTo(ctx, &sink.Spec, &sink.ObjectMeta, t.Name) default: return fmt.Errorf("unknown version, got: %T", sink) } } // ConvertTo implements apis.Convertible -func (ts *TaskSpec) ConvertTo(ctx context.Context, sink *v1.TaskSpec) error { +func (ts *TaskSpec) ConvertTo(ctx context.Context, sink *v1.TaskSpec, meta *metav1.ObjectMeta, taskName string) error { + if err := serializeTaskDeprecations(meta, ts, taskName); err != nil { + return err + } sink.Steps = nil for _, s := range ts.Steps { new := v1.Step{} @@ -91,14 +125,14 @@ func (t *Task) ConvertFrom(ctx context.Context, from apis.Convertible) error { switch source := from.(type) { case *v1.Task: t.ObjectMeta = source.ObjectMeta - return t.Spec.ConvertFrom(ctx, &source.Spec) + return t.Spec.ConvertFrom(ctx, &source.Spec, &t.ObjectMeta, t.Name) default: return fmt.Errorf("unknown version, got: %T", t) } } // ConvertFrom implements apis.Convertible -func (ts *TaskSpec) ConvertFrom(ctx context.Context, source *v1.TaskSpec) error { +func (ts *TaskSpec) ConvertFrom(ctx context.Context, source *v1.TaskSpec, meta *metav1.ObjectMeta, taskName string) error { ts.Steps = nil for _, s := range source.Steps { new := Step{} @@ -111,6 +145,9 @@ func (ts *TaskSpec) ConvertFrom(ctx context.Context, source *v1.TaskSpec) error new.convertFrom(ctx, source.StepTemplate) ts.StepTemplate = &new } + if err := deserializeTaskDeprecations(meta, ts, taskName); err != nil { + return err + } ts.Sidecars = nil for _, s := range source.Sidecars { new := Sidecar{} @@ -139,3 +176,140 @@ func (ts *TaskSpec) ConvertFrom(ctx context.Context, source *v1.TaskSpec) error ts.Description = source.Description return nil } + +// taskDeprecation contains deprecated fields of a Task +// +k8s:openapi-gen=false +type taskDeprecation struct { + // DeprecatedSteps contains Steps of a Task that with deprecated fields defined. + // +listType=atomic + DeprecatedSteps []Step `json:"deprecatedSteps,omitempty"` + // DeprecatedStepTemplate contains stepTemplate of a Task that with deprecated fields defined. + DeprecatedStepTemplate *StepTemplate `json:"deprecatedStepTemplate,omitempty"` +} + +// taskDeprecations contains deprecated fields of Tasks that belong to the same Pipeline or PipelineRun +// the key is Task name +// +k8s:openapi-gen=false +type taskDeprecations map[string]taskDeprecation + +// serializeTaskDeprecations appends the current Task's deprecation info to annotation of the object. +// The object could be Task, TaskRun, Pipeline or PipelineRun +func serializeTaskDeprecations(meta *metav1.ObjectMeta, spec *TaskSpec, taskName string) error { + var taskDeprecation *taskDeprecation + if spec.HasDeprecatedFields() { + taskDeprecation = retrieveTaskDeprecation(spec) + } + var existingDeprecations = taskDeprecations{} + if str, ok := meta.Annotations[TaskDeprecationsAnnotationKey]; ok { + if err := json.Unmarshal([]byte(str), &existingDeprecations); err != nil { + return fmt.Errorf("error deserializing key %s from metadata: %w", TaskDeprecationsAnnotationKey, err) + } + } + if taskDeprecation != nil { + existingDeprecations[taskName] = *taskDeprecation + return version.SerializeToMetadata(meta, existingDeprecations, TaskDeprecationsAnnotationKey) + } + return nil +} + +// deserializeTaskDeprecations retrieves deprecation info of the Task from object annotation. +// The object could be Task, TaskRun, Pipeline or PipelineRun. +func deserializeTaskDeprecations(meta *metav1.ObjectMeta, spec *TaskSpec, taskName string) error { + var existingDeprecations = taskDeprecations{} + if meta == nil || meta.Annotations == nil { + return nil + } + if str, ok := meta.Annotations[TaskDeprecationsAnnotationKey]; ok { + if err := json.Unmarshal([]byte(str), &existingDeprecations); err != nil { + return fmt.Errorf("error deserializing key %s from metadata: %w", TaskDeprecationsAnnotationKey, err) + } + } + if td, ok := existingDeprecations[taskName]; ok { + if len(spec.Steps) != len(td.DeprecatedSteps) { + return fmt.Errorf("length of deserialized steps mismatch the length of target steps") + } + for i := 0; i < len(spec.Steps); i++ { + spec.Steps[i].DeprecatedPorts = td.DeprecatedSteps[i].DeprecatedPorts + spec.Steps[i].DeprecatedLivenessProbe = td.DeprecatedSteps[i].DeprecatedLivenessProbe + spec.Steps[i].DeprecatedReadinessProbe = td.DeprecatedSteps[i].DeprecatedReadinessProbe + spec.Steps[i].DeprecatedStartupProbe = td.DeprecatedSteps[i].DeprecatedStartupProbe + spec.Steps[i].DeprecatedLifecycle = td.DeprecatedSteps[i].DeprecatedLifecycle + spec.Steps[i].DeprecatedTerminationMessagePath = td.DeprecatedSteps[i].DeprecatedTerminationMessagePath + spec.Steps[i].DeprecatedTerminationMessagePolicy = td.DeprecatedSteps[i].DeprecatedTerminationMessagePolicy + spec.Steps[i].DeprecatedStdin = td.DeprecatedSteps[i].DeprecatedStdin + spec.Steps[i].DeprecatedStdinOnce = td.DeprecatedSteps[i].DeprecatedStdinOnce + spec.Steps[i].DeprecatedTTY = td.DeprecatedSteps[i].DeprecatedTTY + } + if td.DeprecatedStepTemplate != nil { + if spec.StepTemplate == nil { + spec.StepTemplate = &StepTemplate{} + } + spec.StepTemplate.DeprecatedName = td.DeprecatedStepTemplate.DeprecatedName + spec.StepTemplate.DeprecatedPorts = td.DeprecatedStepTemplate.DeprecatedPorts + spec.StepTemplate.DeprecatedLivenessProbe = td.DeprecatedStepTemplate.DeprecatedLivenessProbe + spec.StepTemplate.DeprecatedReadinessProbe = td.DeprecatedStepTemplate.DeprecatedReadinessProbe + spec.StepTemplate.DeprecatedStartupProbe = td.DeprecatedStepTemplate.DeprecatedStartupProbe + spec.StepTemplate.DeprecatedLifecycle = td.DeprecatedStepTemplate.DeprecatedLifecycle + spec.StepTemplate.DeprecatedTerminationMessagePath = td.DeprecatedStepTemplate.DeprecatedTerminationMessagePath + spec.StepTemplate.DeprecatedTerminationMessagePolicy = td.DeprecatedStepTemplate.DeprecatedTerminationMessagePolicy + spec.StepTemplate.DeprecatedStdin = td.DeprecatedStepTemplate.DeprecatedStdin + spec.StepTemplate.DeprecatedStdinOnce = td.DeprecatedStepTemplate.DeprecatedStdinOnce + spec.StepTemplate.DeprecatedTTY = td.DeprecatedStepTemplate.DeprecatedTTY + } + delete(existingDeprecations, taskName) + if len(existingDeprecations) == 0 { + delete(meta.Annotations, TaskDeprecationsAnnotationKey) + } else { + updatedDeprecations, err := json.Marshal(existingDeprecations) + if err != nil { + return err + } + meta.Annotations[TaskDeprecationsAnnotationKey] = string(updatedDeprecations) + } + if len(meta.Annotations) == 0 { + meta.Annotations = nil + } + } + return nil +} + +func retrieveTaskDeprecation(spec *TaskSpec) *taskDeprecation { + if !spec.HasDeprecatedFields() { + return nil + } + ds := []Step{} + for _, s := range spec.Steps { + ds = append(ds, Step{ + DeprecatedPorts: s.DeprecatedPorts, + DeprecatedLivenessProbe: s.DeprecatedLivenessProbe, + DeprecatedReadinessProbe: s.DeprecatedReadinessProbe, + DeprecatedStartupProbe: s.DeprecatedStartupProbe, + DeprecatedLifecycle: s.DeprecatedLifecycle, + DeprecatedTerminationMessagePath: s.DeprecatedTerminationMessagePath, + DeprecatedTerminationMessagePolicy: s.DeprecatedTerminationMessagePolicy, + DeprecatedStdin: s.DeprecatedStdin, + DeprecatedStdinOnce: s.DeprecatedStdinOnce, + DeprecatedTTY: s.DeprecatedTTY, + }) + } + var dst *StepTemplate + if spec.StepTemplate != nil { + dst = &StepTemplate{ + DeprecatedName: spec.StepTemplate.DeprecatedName, + DeprecatedPorts: spec.StepTemplate.DeprecatedPorts, + DeprecatedLivenessProbe: spec.StepTemplate.DeprecatedLivenessProbe, + DeprecatedReadinessProbe: spec.StepTemplate.DeprecatedReadinessProbe, + DeprecatedStartupProbe: spec.StepTemplate.DeprecatedStartupProbe, + DeprecatedLifecycle: spec.StepTemplate.DeprecatedLifecycle, + DeprecatedTerminationMessagePath: spec.StepTemplate.DeprecatedTerminationMessagePath, + DeprecatedTerminationMessagePolicy: spec.StepTemplate.DeprecatedTerminationMessagePolicy, + DeprecatedStdin: spec.StepTemplate.DeprecatedStdin, + DeprecatedStdinOnce: spec.StepTemplate.DeprecatedStdinOnce, + DeprecatedTTY: spec.StepTemplate.DeprecatedTTY, + } + } + return &taskDeprecation{ + DeprecatedSteps: ds, + DeprecatedStepTemplate: dst, + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go index 850929d0..85530925 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go @@ -28,13 +28,14 @@ import ( // +genclient:noStatus // +genreconciler:krshapedlogic=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true // Task represents a collection of sequential steps that are run as part of a // Pipeline using a set of inputs and producing a set of outputs. Tasks execute // when TaskRuns are created that provide the input parameters and resources and // output resources the Task requires. // -// +k8s:openapi-gen=true +// Deprecated: Please use v1.Task instead. type Task struct { metav1.TypeMeta `json:",inline"` // +optional @@ -130,3 +131,41 @@ type TaskList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []Task `json:"items"` } + +// HasDeprecatedFields returns true if the TaskSpec has deprecated field specified. +func (ts *TaskSpec) HasDeprecatedFields() bool { + if ts == nil { + return false + } + if len(ts.Steps) > 0 { + for _, s := range ts.Steps { + if len(s.DeprecatedPorts) > 0 || + s.DeprecatedLivenessProbe != nil || + s.DeprecatedReadinessProbe != nil || + s.DeprecatedStartupProbe != nil || + s.DeprecatedLifecycle != nil || + s.DeprecatedTerminationMessagePath != "" || + s.DeprecatedTerminationMessagePolicy != "" || + s.DeprecatedStdin || + s.DeprecatedStdinOnce || + s.DeprecatedTTY { + return true + } + } + } + if ts.StepTemplate != nil { + if len(ts.StepTemplate.DeprecatedPorts) > 0 || + ts.StepTemplate.DeprecatedName != "" || + ts.StepTemplate.DeprecatedReadinessProbe != nil || + ts.StepTemplate.DeprecatedStartupProbe != nil || + ts.StepTemplate.DeprecatedLifecycle != nil || + ts.StepTemplate.DeprecatedTerminationMessagePath != "" || + ts.StepTemplate.DeprecatedTerminationMessagePolicy != "" || + ts.StepTemplate.DeprecatedStdin || + ts.StepTemplate.DeprecatedStdinOnce || + ts.StepTemplate.DeprecatedTTY { + return true + } + } + return false +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go index e76f3421..aec142cf 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go @@ -62,8 +62,10 @@ var objectVariableNameFormatRegex = regexp.MustCompile(objectVariableNameFormat) // Validate implements apis.Validatable func (t *Task) Validate(ctx context.Context) *apis.FieldError { errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false) - return errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + errs = errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + // When a Task is created directly, instead of declared inline in a TaskRun or PipelineRun, + // we do not support propagated parameters. Validate that all params it uses are declared. + return errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.Spec.Steps, t.Spec.Params).ViaField("spec")) } // Validate implements apis.Validatable @@ -72,14 +74,6 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(apis.ErrMissingField("steps")) } - if config.IsSubstituted(ctx) { - // Validate the task's workspaces only. - errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) - errs = errs.Also(validateWorkspaceUsages(ctx, ts)) - - return errs - } - errs = errs.Also(ValidateVolumes(ts.Volumes).ViaField("volumes")) errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) errs = errs.Also(validateWorkspaceUsages(ctx, ts)) @@ -105,6 +99,28 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { return errs } +// ValidateUsageOfDeclaredParameters validates that all parameters referenced in the Task are declared by the Task. +func ValidateUsageOfDeclaredParameters(ctx context.Context, steps []Step, params ParamSpecs) *apis.FieldError { + var errs *apis.FieldError + _, _, objectParams := params.sortByType() + allParameterNames := sets.NewString(params.getNames()...) + errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) + errs = errs.Also(validateObjectUsage(ctx, steps, objectParams)) + errs = errs.Also(validateObjectParamsHaveProperties(ctx, params)) + return errs +} + +// validateObjectParamsHaveProperties returns an error if any declared object params are missing properties +func validateObjectParamsHaveProperties(ctx context.Context, params ParamSpecs) *apis.FieldError { + var errs *apis.FieldError + for _, p := range params { + if p.Type == ParamTypeObject && p.Properties == nil { + errs = errs.Also(apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name))) + } + } + return errs +} + func validateSidecarNames(sidecars []Sidecar) (errs *apis.FieldError) { for _, sc := range sidecars { if sc.Name == pipeline.ReservedResultsSidecarName { @@ -160,8 +176,12 @@ func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, // validateWorkspaceUsages checks that all WorkspaceUsage objects in Steps // refer to workspaces that are defined in the Task. // -// This is an alpha feature and will fail validation if it's used by a step -// or sidecar when the enable-api-fields feature gate is anything but "alpha". +// This is a beta feature and will fail validation if it's used by a step +// or sidecar when the enable-api-fields feature gate is anything but "beta". +// +// Note that this feature reached beta after the v1 API version has been released and +// consequently it is *not* implicitly enabled on the v1beta1 API to avoid suffering +// from the issues described in TEP-0138 https://github.com/tektoncd/community/pull/1034 func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.FieldError) { workspaces := ts.Workspaces steps := ts.Steps @@ -174,7 +194,7 @@ func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.Fiel for stepIdx, step := range steps { if len(step.Workspaces) != 0 { - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.AlphaAPIFields).ViaIndex(stepIdx).ViaField("steps")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.BetaAPIFields).ViaIndex(stepIdx).ViaField("steps")) } for workspaceIdx, w := range step.Workspaces { if !wsNames.Has(w.Name) { @@ -185,7 +205,7 @@ func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.Fiel for sidecarIdx, sidecar := range sidecars { if len(sidecar.Workspaces) != 0 { - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.AlphaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.BetaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) } for workspaceIdx, w := range sidecar.Workspaces { if !wsNames.Has(w.Name) { @@ -297,11 +317,6 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi // ValidateParameterTypes validates all the types within a slice of ParamSpecs func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) { for _, p := range params { - if p.Type == ParamTypeObject { - // Object type parameter is a beta feature and will fail validation if it's used in a task spec - // when the enable-api-fields feature gate is not "alpha" or "beta". - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields)) - } errs = errs.Also(p.ValidateType(ctx)) } return errs @@ -340,14 +355,6 @@ func (p ParamSpec) ValidateType(ctx context.Context) *apis.FieldError { // definition of `properties` section and the type of a PropertySpec is allowed. // (Currently, only string is allowed) func (p ParamSpec) ValidateObjectType(ctx context.Context) *apis.FieldError { - if p.Type == ParamTypeObject && p.Properties == nil { - // If this we are not skipping validation checks due to propagated params - // then properties field is required. - if config.ValidateParameterVariablesAndWorkspaces(ctx) { - return apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) - } - } - invalidKeys := []string{} for key, propertySpec := range p.Properties { if propertySpec.Type != ParamTypeString { @@ -366,38 +373,17 @@ func (p ParamSpec) ValidateObjectType(ctx context.Context) *apis.FieldError { } // ValidateParameterVariables validates all variables within a slice of ParamSpecs against a slice of Steps -func ValidateParameterVariables(ctx context.Context, steps []Step, params []ParamSpec) *apis.FieldError { - allParameterNames := sets.NewString() - stringParameterNames := sets.NewString() - arrayParameterNames := sets.NewString() - objectParamSpecs := []ParamSpec{} +func ValidateParameterVariables(ctx context.Context, steps []Step, params ParamSpecs) *apis.FieldError { var errs *apis.FieldError - for _, p := range params { - // validate no duplicate names - if allParameterNames.Has(p.Name) { - errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) - } - allParameterNames.Insert(p.Name) - - switch p.Type { - case ParamTypeArray: - arrayParameterNames.Insert(p.Name) - case ParamTypeObject: - objectParamSpecs = append(objectParamSpecs, p) - case ParamTypeString: - fallthrough - default: - stringParameterNames.Insert(p.Name) - } - } - errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParamSpecs)) - if config.ValidateParameterVariablesAndWorkspaces(ctx) { - errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) - errs = errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) - } + errs = errs.Also(params.validateNoDuplicateNames()) + stringParams, arrayParams, objectParams := params.sortByType() + stringParameterNames := sets.NewString(stringParams.getNames()...) + arrayParameterNames := sets.NewString(arrayParams.getNames()...) + errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParams)) return errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) } +// validateTaskContextVariables returns an error if any Steps reference context variables that don't exist. func validateTaskContextVariables(ctx context.Context, steps []Step) *apis.FieldError { taskRunContextNames := sets.NewString().Insert( "name", @@ -419,7 +405,7 @@ func validateTaskResultsVariables(ctx context.Context, steps []Step, results []T resultsNames.Insert(r.Name) } for idx, step := range steps { - errs = errs.Also(validateTaskVariable(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx)) } return errs } @@ -444,8 +430,7 @@ func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) return errs.Also(validateObjectUsageAsWhole(steps, "params", objectParameterNames)) } -// validateObjectUsageAsWhole makes sure the object params are not used as whole when providing values for strings -// i.e. param.objectParam, param.objectParam[*] +// validateObjectUsageAsWhole returns an error if the Steps contain references to the entire input object params in fields where these references are prohibited func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { for idx, step := range steps { errs = errs.Also(validateStepObjectUsageAsWhole(step, prefix, vars)).ViaFieldIndex("steps", idx) @@ -453,57 +438,61 @@ func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) ( return errs } +// validateStepObjectUsageAsWhole returns an error if the Step contains references to the entire input object params in fields where these references are prohibited func validateStepObjectUsageAsWhole(step Step, prefix string, vars sets.String) *apis.FieldError { - errs := validateTaskNoObjectReferenced(step.Name, prefix, vars).ViaField("name") - errs = errs.Also(validateTaskNoObjectReferenced(step.Image, prefix, vars).ViaField("image")) - errs = errs.Also(validateTaskNoObjectReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskNoObjectReferenced(step.Script, prefix, vars).ViaField("script")) + errs := substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Script, prefix, vars).ViaField("script")) for i, cmd := range step.Command { - errs = errs.Also(validateTaskNoObjectReferenced(cmd, prefix, vars).ViaFieldIndex("command", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(cmd, prefix, vars).ViaFieldIndex("command", i)) } for i, arg := range step.Args { - errs = errs.Also(validateTaskNoObjectReferenced(arg, prefix, vars).ViaFieldIndex("args", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(arg, prefix, vars).ViaFieldIndex("args", i)) } for _, env := range step.Env { - errs = errs.Also(validateTaskNoObjectReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) } for i, v := range step.VolumeMounts { - errs = errs.Also(validateTaskNoObjectReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoObjectReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoObjectReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) } return errs } -func validateArrayUsage(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { +// validateArrayUsage returns an error if the Steps contain references to the input array params in fields where these references are prohibited +func validateArrayUsage(steps []Step, prefix string, arrayParamNames sets.String) (errs *apis.FieldError) { for idx, step := range steps { - errs = errs.Also(validateStepArrayUsage(step, prefix, vars)).ViaFieldIndex("steps", idx) + errs = errs.Also(validateStepArrayUsage(step, prefix, arrayParamNames)).ViaFieldIndex("steps", idx) } return errs } -func validateStepArrayUsage(step Step, prefix string, vars sets.String) *apis.FieldError { - errs := validateTaskNoArrayReferenced(step.Name, prefix, vars).ViaField("name") - errs = errs.Also(validateTaskNoArrayReferenced(step.Image, prefix, vars).ViaField("image")) - errs = errs.Also(validateTaskNoArrayReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskNoArrayReferenced(step.Script, prefix, vars).ViaField("script")) +// validateStepArrayUsage returns an error if the Step contains references to the input array params in fields where these references are prohibited +func validateStepArrayUsage(step Step, prefix string, arrayParamNames sets.String) *apis.FieldError { + errs := substitution.ValidateNoReferencesToProhibitedVariables(step.Name, prefix, arrayParamNames).ViaField("name") + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.Image, prefix, arrayParamNames).ViaField("image")) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.WorkingDir, prefix, arrayParamNames).ViaField("workingDir")) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.Script, prefix, arrayParamNames).ViaField("script")) for i, cmd := range step.Command { - errs = errs.Also(validateTaskArraysIsolated(cmd, prefix, vars).ViaFieldIndex("command", i)) + errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(cmd, prefix, arrayParamNames).ViaFieldIndex("command", i)) } for i, arg := range step.Args { - errs = errs.Also(validateTaskArraysIsolated(arg, prefix, vars).ViaFieldIndex("args", i)) + errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(arg, prefix, arrayParamNames).ViaFieldIndex("args", i)) } for _, env := range step.Env { - errs = errs.Also(validateTaskNoArrayReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(env.Value, prefix, arrayParamNames).ViaFieldKey("env", env.Name)) } for i, v := range step.VolumeMounts { - errs = errs.Also(validateTaskNoArrayReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoArrayReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoArrayReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.Name, prefix, arrayParamNames).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.MountPath, prefix, arrayParamNames).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.SubPath, prefix, arrayParamNames).ViaField("subPath").ViaFieldIndex("volumeMount", i)) } return errs } +// validateVariables returns an error if the Steps contain references to any unknown variables func validateVariables(ctx context.Context, steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { // We've checked param name format. Now, we want to check if param names are referenced correctly in each step for idx, step := range steps { @@ -561,68 +550,41 @@ func validateNameFormat(stringAndArrayParams sets.String, objectParams []ParamSp return errs } +// validateStepVariables returns an error if the Step contains references to any unknown variables func validateStepVariables(ctx context.Context, step Step, prefix string, vars sets.String) *apis.FieldError { - errs := validateTaskVariable(step.Name, prefix, vars).ViaField("name") - errs = errs.Also(validateTaskVariable(step.Image, prefix, vars).ViaField("image")) - errs = errs.Also(validateTaskVariable(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskVariable(step.Script, prefix, vars).ViaField("script")) + errs := substitution.ValidateNoReferencesToUnknownVariables(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, prefix, vars).ViaField("script")) for i, cmd := range step.Command { - errs = errs.Also(validateTaskVariable(cmd, prefix, vars).ViaFieldIndex("command", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(cmd, prefix, vars).ViaFieldIndex("command", i)) } for i, arg := range step.Args { - errs = errs.Also(validateTaskVariable(arg, prefix, vars).ViaFieldIndex("args", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(arg, prefix, vars).ViaFieldIndex("args", i)) } for _, env := range step.Env { - errs = errs.Also(validateTaskVariable(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) } for i, v := range step.VolumeMounts { - errs = errs.Also(validateTaskVariable(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskVariable(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskVariable(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i)) } - errs = errs.Also(validateTaskVariable(string(step.OnError), prefix, vars).ViaField("onError")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(string(step.OnError), prefix, vars).ViaField("onError")) return errs } -func validateTaskVariable(value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariableP(value, prefix, vars) -} - -func validateTaskNoObjectReferenced(value, prefix string, objectNames sets.String) *apis.FieldError { - return substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames) -} - -func validateTaskNoArrayReferenced(value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableProhibitedP(value, prefix, arrayNames) -} - -func validateTaskArraysIsolated(value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableIsolatedP(value, prefix, arrayNames) -} - // isParamRefs attempts to check if a specified string looks like it contains any parameter reference // This is useful to make sure the specified value looks like a Parameter Reference before performing any strict validation func isParamRefs(s string) bool { return strings.HasPrefix(s, "$("+ParamsPrefix) } -// ValidateParamArrayIndex validates if the param reference to an array param is out of bound. -// error is returned when the array indexing reference is out of bound of the array param -// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2. -// - `trParams` are params from taskrun. -// - `taskSpec` contains params declarations. -func (ts *TaskSpec) ValidateParamArrayIndex(ctx context.Context, params Params) error { - cfg := config.FromContextOrDefaults(ctx) - if cfg.FeatureFlags.EnableAPIFields != config.AlphaAPIFields { - return nil - } - - // Collect all array params lengths - arrayParamsLengths := ts.Params.extractParamArrayLengths() - for k, v := range params.extractParamArrayLengths() { - arrayParamsLengths[k] = v - } - +// GetIndexingReferencesToArrayParams returns all strings referencing indices of TaskRun array parameters +// from parameters, workspaces, and when expressions defined in the Task. +// For example, if a Task has a parameter with a value "$(params.array-param-name[1])", +// this would be one of the strings returned. +func (ts *TaskSpec) GetIndexingReferencesToArrayParams() sets.String { // collect all the possible places to use param references paramsRefs := []string{} paramsRefs = append(paramsRefs, extractParamRefsFromSteps(ts.Steps)...) @@ -632,12 +594,10 @@ func (ts *TaskSpec) ValidateParamArrayIndex(ctx context.Context, params Params) paramsRefs = append(paramsRefs, v.MountPath) } paramsRefs = append(paramsRefs, extractParamRefsFromSidecars(ts.Sidecars)...) - // extract all array indexing references, for example []{"$(params.array-params[1])"} arrayIndexParamRefs := []string{} for _, p := range paramsRefs { arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...) } - - return validateOutofBoundArrayParams(arrayIndexParamRefs, arrayParamsLengths) + return sets.NewString(arrayIndexParamRefs...) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go index e8e69519..ab5f2a56 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go @@ -34,7 +34,8 @@ func (tr TaskRef) convertTo(ctx context.Context, sink *v1.TaskRef) { tr.convertBundleToResolver(sink) } -func (tr *TaskRef) convertFrom(ctx context.Context, source v1.TaskRef) { +// ConvertFrom converts v1beta1 TaskRef from v1 TaskRef +func (tr *TaskRef) ConvertFrom(ctx context.Context, source v1.TaskRef) { tr.Name = source.Name tr.Kind = TaskKind(source.Kind) tr.APIVersion = source.APIVersion diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go index 02971399..4e42b7aa 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go @@ -18,8 +18,10 @@ package v1beta1 import ( "context" + "strings" "github.com/google/go-containerregistry/pkg/name" + "k8s.io/apimachinery/pkg/util/validation" "knative.dev/pkg/apis" ) @@ -30,7 +32,8 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { return } - if ref.Resolver != "" || ref.Params != nil { + switch { + case ref.Resolver != "" || ref.Params != nil: if ref.Resolver != "" { if ref.Name != "" { errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) @@ -51,16 +54,21 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { } errs = errs.Also(ValidateParameters(ctx, ref.Params)) } - } else { + case ref.Bundle != "": if ref.Name == "" { errs = errs.Also(apis.ErrMissingField("name")) } - if ref.Bundle != "" { - errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle")) - if _, err := name.ParseReference(ref.Bundle); err != nil { - errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) - } + errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle")) + if _, err := name.ParseReference(ref.Bundle); err != nil { + errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) + } + default: + if ref.Name == "" { + errs = errs.Also(apis.ErrMissingField("name")) + } else if errSlice := validation.IsQualifiedName(ref.Name); len(errSlice) != 0 { + // TaskRef name must be a valid k8s name + errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name")) } } - return //nolint:nakedret + return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go index b2745ec5..d97da847 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go @@ -44,17 +44,17 @@ func (tr *TaskRun) ConvertTo(ctx context.Context, to apis.Convertible) error { if err := serializeTaskRunCloudEvents(&sink.ObjectMeta, &tr.Status); err != nil { return err } - if err := tr.Status.ConvertTo(ctx, &sink.Status); err != nil { + if err := tr.Status.ConvertTo(ctx, &sink.Status, &sink.ObjectMeta); err != nil { return err } - return tr.Spec.ConvertTo(ctx, &sink.Spec) + return tr.Spec.ConvertTo(ctx, &sink.Spec, &sink.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", sink) } } // ConvertTo implements apis.Convertible -func (trs *TaskRunSpec) ConvertTo(ctx context.Context, sink *v1.TaskRunSpec) error { +func (trs *TaskRunSpec) ConvertTo(ctx context.Context, sink *v1.TaskRunSpec, meta *metav1.ObjectMeta) error { if trs.Debug != nil { sink.Debug = &v1.TaskRunDebug{} trs.Debug.convertTo(ctx, sink.Debug) @@ -72,7 +72,7 @@ func (trs *TaskRunSpec) ConvertTo(ctx context.Context, sink *v1.TaskRunSpec) err } if trs.TaskSpec != nil { sink.TaskSpec = &v1.TaskSpec{} - err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec) + err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec, meta, meta.Name) if err != nil { return err } @@ -115,17 +115,17 @@ func (tr *TaskRun) ConvertFrom(ctx context.Context, from apis.Convertible) error if err := deserializeTaskRunCloudEvents(&tr.ObjectMeta, &tr.Status); err != nil { return err } - if err := tr.Status.ConvertFrom(ctx, source.Status); err != nil { + if err := tr.Status.ConvertFrom(ctx, source.Status, &tr.ObjectMeta); err != nil { return err } - return tr.Spec.ConvertFrom(ctx, &source.Spec) + return tr.Spec.ConvertFrom(ctx, &source.Spec, &tr.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", tr) } } // ConvertFrom implements apis.Convertible -func (trs *TaskRunSpec) ConvertFrom(ctx context.Context, source *v1.TaskRunSpec) error { +func (trs *TaskRunSpec) ConvertFrom(ctx context.Context, source *v1.TaskRunSpec, meta *metav1.ObjectMeta) error { if source.Debug != nil { newDebug := TaskRunDebug{} newDebug.convertFrom(ctx, *source.Debug) @@ -134,18 +134,18 @@ func (trs *TaskRunSpec) ConvertFrom(ctx context.Context, source *v1.TaskRunSpec) trs.Params = nil for _, p := range source.Params { new := Param{} - new.convertFrom(ctx, p) + new.ConvertFrom(ctx, p) trs.Params = append(trs.Params, new) } trs.ServiceAccountName = source.ServiceAccountName if source.TaskRef != nil { newTaskRef := TaskRef{} - newTaskRef.convertFrom(ctx, *source.TaskRef) + newTaskRef.ConvertFrom(ctx, *source.TaskRef) trs.TaskRef = &newTaskRef } if source.TaskSpec != nil { newTaskSpec := TaskSpec{} - err := newTaskSpec.ConvertFrom(ctx, source.TaskSpec) + err := newTaskSpec.ConvertFrom(ctx, source.TaskSpec, meta, meta.Name) if err != nil { return err } @@ -159,7 +159,7 @@ func (trs *TaskRunSpec) ConvertFrom(ctx context.Context, source *v1.TaskRunSpec) trs.Workspaces = nil for _, w := range source.Workspaces { new := WorkspaceBinding{} - new.convertFrom(ctx, w) + new.ConvertFrom(ctx, w) trs.Workspaces = append(trs.Workspaces, new) } trs.StepOverrides = nil @@ -207,7 +207,7 @@ func (trso *TaskRunSidecarOverride) convertFrom(ctx context.Context, source v1.T } // ConvertTo implements apis.Convertible -func (trs *TaskRunStatus) ConvertTo(ctx context.Context, sink *v1.TaskRunStatus) error { +func (trs *TaskRunStatus) ConvertTo(ctx context.Context, sink *v1.TaskRunStatus, meta *metav1.ObjectMeta) error { sink.Status = trs.Status sink.PodName = trs.PodName sink.StartTime = trs.StartTime @@ -221,7 +221,7 @@ func (trs *TaskRunStatus) ConvertTo(ctx context.Context, sink *v1.TaskRunStatus) sink.RetriesStatus = nil for _, rr := range trs.RetriesStatus { new := v1.TaskRunStatus{} - err := rr.ConvertTo(ctx, &new) + err := rr.ConvertTo(ctx, &new, meta) if err != nil { return err } @@ -242,7 +242,7 @@ func (trs *TaskRunStatus) ConvertTo(ctx context.Context, sink *v1.TaskRunStatus) if trs.TaskSpec != nil { sink.TaskSpec = &v1.TaskSpec{} - err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec) + err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec, meta, meta.Name) if err != nil { return err } @@ -256,7 +256,7 @@ func (trs *TaskRunStatus) ConvertTo(ctx context.Context, sink *v1.TaskRunStatus) } // ConvertFrom implements apis.Convertible -func (trs *TaskRunStatus) ConvertFrom(ctx context.Context, source v1.TaskRunStatus) error { +func (trs *TaskRunStatus) ConvertFrom(ctx context.Context, source v1.TaskRunStatus, meta *metav1.ObjectMeta) error { trs.Status = source.Status trs.PodName = source.PodName trs.StartTime = source.StartTime @@ -270,7 +270,7 @@ func (trs *TaskRunStatus) ConvertFrom(ctx context.Context, source v1.TaskRunStat trs.RetriesStatus = nil for _, rr := range source.RetriesStatus { new := TaskRunStatus{} - err := new.ConvertFrom(ctx, rr) + err := new.ConvertFrom(ctx, rr, meta) if err != nil { return err } @@ -291,7 +291,7 @@ func (trs *TaskRunStatus) ConvertFrom(ctx context.Context, source v1.TaskRunStat if source.TaskSpec != nil { trs.TaskSpec = &TaskSpec{} - err := trs.TaskSpec.ConvertFrom(ctx, source.TaskSpec) + err := trs.TaskSpec.ConvertFrom(ctx, source.TaskSpec, meta, meta.Name) if err != nil { return err } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go index 61f3285d..2e4896d4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go @@ -24,6 +24,7 @@ import ( pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" + "knative.dev/pkg/kmap" ) var _ apis.Defaultable = (*TaskRun)(nil) @@ -36,6 +37,13 @@ func (tr *TaskRun) SetDefaults(ctx context.Context) { ctx = apis.WithinParent(ctx, tr.ObjectMeta) tr.Spec.SetDefaults(ctx) + // Silently filtering out Tekton Reserved annotations at creation + if apis.IsInCreate(ctx) { + tr.ObjectMeta.Annotations = kmap.Filter(tr.ObjectMeta.Annotations, func(s string) bool { + return filterReservedAnnotationRegexp.MatchString(s) + }) + } + // If the TaskRun doesn't have a managed-by label, apply the default // specified in the config. cfg := config.FromContextOrDefaults(ctx) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go index 27bb5143..134e1c81 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go @@ -61,7 +61,6 @@ type TaskRunSpec struct { // +optional Retries int `json:"retries,omitempty"` // Time after which one retry attempt times out. Defaults to 1 hour. - // Specified build timeout should be less than 24h. // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -389,12 +388,13 @@ type CloudEventDeliveryState struct { // +genclient // +genreconciler:krshapedlogic=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true // TaskRun represents a single execution of a Task. TaskRuns are how the steps // specified in a Task are executed; they specify the parameters and resources // used to run the steps in a Task. // -// +k8s:openapi-gen=true +// Deprecated: Please use v1.TaskRun instead. type TaskRun struct { metav1.TypeMeta `json:",inline"` // +optional @@ -450,11 +450,16 @@ func (tr *TaskRun) HasStarted() bool { return tr.Status.StartTime != nil && !tr.Status.StartTime.IsZero() } -// IsSuccessful returns true if the TaskRun's status indicates that it is done. +// IsSuccessful returns true if the TaskRun's status indicates that it has succeeded. func (tr *TaskRun) IsSuccessful() bool { return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() } +// IsFailure returns true if the TaskRun's status indicates that it has failed. +func (tr *TaskRun) IsFailure() bool { + return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsFalse() +} + // IsCancelled returns true if the TaskRun's spec status is set to Cancelled state func (tr *TaskRun) IsCancelled() bool { return tr.Spec.Status == TaskRunSpecStatusCancelled diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go index ef414612..300fd4aa 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go @@ -62,8 +62,6 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { } // Validate TaskSpec if it's present. if ts.TaskSpec != nil { - // skip validation of parameter and workspaces variables since we validate them via taskrunspec below. - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, true) errs = errs.Also(ts.TaskSpec.Validate(ctx).ViaField("taskSpec")) } @@ -143,7 +141,8 @@ func (ts *TaskRunSpec) validateInlineParameters(ctx context.Context) (errs *apis } if ts.TaskSpec != nil && ts.TaskSpec.Steps != nil { errs = errs.Also(ValidateParameterTypes(ctx, paramSpec)) - errs = errs.Also(ValidateParameterVariables(config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), ts.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateParameterVariables(ctx, ts.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, ts.TaskSpec.Steps, paramSpec)) } return errs } @@ -243,11 +242,6 @@ func ValidateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs func ValidateParameters(ctx context.Context, params Params) (errs *apis.FieldError) { var names []string for _, p := range params { - if p.Value.Type == ParamTypeObject { - // Object type parameter is a beta feature and will fail validation if it's used in a taskrun spec - // when the enable-api-fields feature gate is not "alpha" or "beta". - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields)) - } names = append(names, p.Name) } return errs.Also(validateNoDuplicateNames(names, false)) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go index ac53da80..e98eff14 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go @@ -102,9 +102,9 @@ func (wes WhenExpressions) AllowsExecution() bool { return true } -// ReplaceWhenExpressionsVariables interpolates variables, such as Parameters and Results, in +// ReplaceVariables interpolates variables, such as Parameters and Results, in // the Input and Values. -func (wes WhenExpressions) ReplaceWhenExpressionsVariables(replacements map[string]string, arrayReplacements map[string][]string) WhenExpressions { +func (wes WhenExpressions) ReplaceVariables(replacements map[string]string, arrayReplacements map[string][]string) WhenExpressions { replaced := wes for i := range wes { replaced[i] = wes[i].applyReplacements(replacements, arrayReplacements) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go index f7daa5cf..e1f33a7f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go @@ -84,7 +84,8 @@ func (w WorkspaceBinding) convertTo(ctx context.Context, sink *v1.WorkspaceBindi sink.CSI = w.CSI } -func (w *WorkspaceBinding) convertFrom(ctx context.Context, source v1.WorkspaceBinding) { +// ConvertFrom converts v1beta1 Param from v1 Param +func (w *WorkspaceBinding) ConvertFrom(ctx context.Context, source v1.WorkspaceBinding) { w.Name = source.Name w.SubPath = source.SubPath w.VolumeClaimTemplate = source.VolumeClaimTemplate diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/version/conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/version/conversion.go index 1d509cea..35a691cb 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/version/conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/version/conversion.go @@ -41,7 +41,7 @@ func SerializeToMetadata(meta *metav1.ObjectMeta, field interface{}, key string) // deserializes it into "to", and removes the key from the metadata's annotations. // Returns nil if the key is not present in the annotations. func DeserializeFromMetadata(meta *metav1.ObjectMeta, to interface{}, key string) error { - if meta.Annotations == nil { + if meta == nil || meta.Annotations == nil { return nil } if str, ok := meta.Annotations[key]; ok { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go index cb93bc8e..69b2c0d8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go @@ -48,24 +48,12 @@ var paramIndexingRegex = regexp.MustCompile(paramIndexing) // intIndexRegex will match all `[int]` for param expression var intIndexRegex = regexp.MustCompile(intIndex) -// ValidateVariable makes sure all variables in the provided string are known -func ValidateVariable(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present, _ := ExtractVariablesFromString(value, prefix); present { - for _, v := range vs { - v = strings.TrimSuffix(v, "[*]") - if !vars.Has(v) { - return &apis.FieldError{ - Message: fmt.Sprintf("non-existent variable in %q for %s %s", value, locationName, name), - Paths: []string{path + "." + name}, - } - } - } - } - return nil -} - -// ValidateVariableP makes sure all variables for a parameter in the provided string are known -func ValidateVariableP(value, prefix string, vars sets.String) *apis.FieldError { +// ValidateNoReferencesToUnknownVariables returns an error if the input string contains references to unknown variables +// Inputs: +// - value: a string containing a reference to a variable that can be substituted, e.g. "echo $(params.foo)" +// - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline" +// - vars: names of known variables +func ValidateNoReferencesToUnknownVariables(value, prefix string, vars sets.String) *apis.FieldError { if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ @@ -87,24 +75,14 @@ func ValidateVariableP(value, prefix string, vars sets.String) *apis.FieldError return nil } -// ValidateVariableProhibited verifies that variables matching the relevant string expressions do not reference any of the names present in vars. -func ValidateVariableProhibited(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present, _ := ExtractVariablesFromString(value, prefix); present { - for _, v := range vs { - v = strings.TrimSuffix(v, "[*]") - if vars.Has(v) { - return &apis.FieldError{ - Message: fmt.Sprintf("variable type invalid in %q for %s %s", value, locationName, name), - Paths: []string{path + "." + name}, - } - } - } - } - return nil -} - -// ValidateVariableProhibitedP verifies that variables for a parameter matching the relevant string expressions do not reference any of the names present in vars. -func ValidateVariableProhibitedP(value, prefix string, vars sets.String) *apis.FieldError { +// ValidateNoReferencesToProhibitedVariables returns an error if the input string contains any references to any variables in vars, +// except for array indexing references. +// +// Inputs: +// - value: a string containing a reference to a variable that can be substituted, e.g. "echo $(params.foo)" +// - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline" +// - vars: names of known variables +func ValidateNoReferencesToProhibitedVariables(value, prefix string, vars sets.String) *apis.FieldError { if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ @@ -126,14 +104,20 @@ func ValidateVariableProhibitedP(value, prefix string, vars sets.String) *apis.F return nil } -// ValidateEntireVariableProhibitedP verifies that values of object type are not used as whole. -func ValidateEntireVariableProhibitedP(value, prefix string, vars sets.String) *apis.FieldError { +// ValidateNoReferencesToEntireProhibitedVariables returns an error if the input string contains any whole array/object references +// to any variables in vars. References to array indexes or object keys are permitted. +// +// Inputs: +// - value: a string containing a reference to a variable that can be substituted, e.g. "echo $(params.foo)" +// - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline" +// - vars: names of known variables +func ValidateNoReferencesToEntireProhibitedVariables(value, prefix string, vars sets.String) *apis.FieldError { + paths := []string{""} // Empty path is required to make the `ViaField`, … work vs, err := extractEntireVariablesFromString(value, prefix) if err != nil { return &apis.FieldError{ Message: fmt.Sprintf("extractEntireVariablesFromString failed : %v", err), - // Empty path is required to make the `ViaField`, … work - Paths: []string{""}, + Paths: paths, } } @@ -142,8 +126,7 @@ func ValidateEntireVariableProhibitedP(value, prefix string, vars sets.String) * if vars.Has(v) { return &apis.FieldError{ Message: fmt.Sprintf("variable type invalid in %q", value), - // Empty path is required to make the `ViaField`, … work - Paths: []string{""}, + Paths: paths, } } } @@ -151,43 +134,35 @@ func ValidateEntireVariableProhibitedP(value, prefix string, vars sets.String) * return nil } -// ValidateVariableIsolated verifies that variables matching the relevant string expressions are completely isolated if present. -func ValidateVariableIsolated(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present, _ := ExtractVariablesFromString(value, prefix); present { - firstMatch, _ := extractExpressionFromString(value, prefix) - for _, v := range vs { - v = strings.TrimSuffix(v, "[*]") - if vars.Has(v) { - if len(value) != len(firstMatch) { - return &apis.FieldError{ - Message: fmt.Sprintf("variable is not properly isolated in %q for %s %s", value, locationName, name), - Paths: []string{path + "." + name}, - } - } - } - } - } - return nil -} - -// ValidateVariableIsolatedP verifies that variables matching the relevant string expressions are completely isolated if present. -func ValidateVariableIsolatedP(value, prefix string, vars sets.String) *apis.FieldError { +// ValidateVariableReferenceIsIsolated returns an error if the input string contains characters in addition to references to known parameters. +// For example, if "foo" is a known parameter, a value of "foo: $(params.foo)" returns an error, but a value of "$(params.foo)" does not. +// Inputs: +// - value: a string containing a reference to a variable that can be substituted, e.g. "echo $(params.foo)" +// - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline" +// - vars: names of known variables +func ValidateVariableReferenceIsIsolated(value, prefix string, vars sets.String) *apis.FieldError { + paths := []string{""} // Empty path is required to make the `ViaField`, … work if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ Message: errString, - Paths: []string{""}, + Paths: paths, + } + } + firstMatch, err := extractExpressionFromString(value, prefix) + if err != nil { + return &apis.FieldError{ + Message: err.Error(), + Paths: paths, } } - firstMatch, _ := extractExpressionFromString(value, prefix) for _, v := range vs { v = strings.TrimSuffix(v, "[*]") if vars.Has(v) { if len(value) != len(firstMatch) { return &apis.FieldError{ Message: fmt.Sprintf("variable is not properly isolated in %q", value), - // Empty path is required to make the `ViaField`, … work - Paths: []string{""}, + Paths: paths, } } } @@ -213,31 +188,37 @@ func ValidateWholeArrayOrObjectRefInStringVariable(name, value, prefix string, v } if isolatedVariableRegex.MatchString(value) { - return true, ValidateVariableP(value, prefix, vars).ViaFieldKey(prefix, name) + return true, ValidateNoReferencesToUnknownVariables(value, prefix, vars).ViaFieldKey(prefix, name) } return false, nil } -// extract a the first full string expressions found (e.g "$(input.params.foo)"). Return -// "" and false if nothing is found. -func extractExpressionFromString(s, prefix string) (string, bool) { +// extract a the first full string expressions found (e.g "$(input.params.foo)"). +// Returns "" if nothing is found. +func extractExpressionFromString(s, prefix string) (string, error) { pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) - re := regexp.MustCompile(pattern) + re, err := regexp.Compile(pattern) + if err != nil { + return "", err + } match := re.FindStringSubmatch(s) if match == nil { - return "", false + return "", nil } - return match[0], true + return match[0], nil } // ExtractVariablesFromString extracts variables from an input string s with the given prefix via regex matching. // It returns a slice of strings which contains the extracted variables, a bool flag to indicate if matches were found // and the error string if the referencing of parameters is invalid. -// If the string does not contain the input prefix then the output will contain an empty slice of strings. +// If the string does not contain the input prefix then the output will contain a slice of strings with length 0. func ExtractVariablesFromString(s, prefix string) ([]string, bool, string) { pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) - re := regexp.MustCompile(pattern) + re, err := regexp.Compile(pattern) + if err != nil { + return nil, false, "" + } matches := re.FindAllStringSubmatch(s, -1) errString := "" // Input string does not contain the prefix and therefore not matches are found. @@ -272,11 +253,12 @@ func ExtractVariablesFromString(s, prefix string) ([]string, bool, string) { return vars, true, errString } +// extractEntireVariablesFromString returns any references to entire array or object params in s with the given prefix func extractEntireVariablesFromString(s, prefix string) ([]string, error) { pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) re, err := regexp.Compile(pattern) if err != nil { - return nil, fmt.Errorf("Fail to parse regex pattern: %w", err) + return nil, fmt.Errorf("failed to parse regex pattern: %w", err) } matches := re.FindAllStringSubmatch(s, -1) @@ -308,7 +290,10 @@ func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string { return groups } -// ApplyReplacements applies string replacements +// ApplyReplacements returns a string with references to parameters replaced, +// based on the mapping provided in replacements. +// For example, if the input string is "foo: $(params.foo)", and replacements maps "params.foo" to "bar", +// the output would be "foo: bar". func ApplyReplacements(in string, replacements map[string]string) string { replacementsList := []string{} for k, v := range replacements { diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml index fbc6df79..2346df13 100644 --- a/vendor/go.uber.org/zap/.golangci.yml +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -17,7 +17,7 @@ linters: - unused # Our own extras: - - gofmt + - gofumpt - nolintlint # lints nolint directives - revive diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl index 92aa65d6..4fea3027 100644 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -1,7 +1,15 @@ # :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +
+ Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -92,7 +100,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 11b46597..6d6cd5f4 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,14 +3,30 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.27.0 (20 Feb 2024) +Enhancements: +* [#1378][]: Add `WithLazy` method for `SugaredLogger`. +* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`. +* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`. +* [#1416][]: Add `WithPanicHook` option for testing panic logs. + +Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release. + +[#1378]: https://github.com/uber-go/zap/pull/1378 +[#1399]: https://github.com/uber-go/zap/pull/1399 +[#1406]: https://github.com/uber-go/zap/pull/1406 +[#1416]: https://github.com/uber-go/zap/pull/1416 + ## 1.26.0 (14 Sep 2023) Enhancements: +* [#1297][]: Add Dict as a Field. * [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured context. * [#1350][]: String encoding is much (~50%) faster now. -Thanks to @jquirke, @cdvr1993 for their contributions to this release. +Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release. +[#1297]: https://github.com/uber-go/zap/pull/1297 [#1319]: https://github.com/uber-go/zap/pull/1319 [#1350]: https://github.com/uber-go/zap/pull/1350 @@ -25,7 +41,7 @@ Enhancements: * [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. * [#1281][]: Add `zap/exp/expfield` package which contains helper methods `Str` and `Strs` for constructing String-like zap.Fields. -* [#1310][]: Reduce stack size on `Any`. +* [#1310][]: Reduce stack size on `Any`. Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions to this release. @@ -352,7 +368,7 @@ to this release. [#675]: https://github.com/uber-go/zap/pull/675 [#704]: https://github.com/uber-go/zap/pull/704 -## v1.9.1 (06 Aug 2018) +## 1.9.1 (06 Aug 2018) Bugfixes: @@ -360,7 +376,7 @@ Bugfixes: [#614]: https://github.com/uber-go/zap/pull/614 -## v1.9.0 (19 Jul 2018) +## 1.9.0 (19 Jul 2018) Enhancements: * [#602][]: Reduce number of allocations when logging with reflection. @@ -373,7 +389,7 @@ Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and [#572]: https://github.com/uber-go/zap/pull/572 [#606]: https://github.com/uber-go/zap/pull/606 -## v1.8.0 (13 Apr 2018) +## 1.8.0 (13 Apr 2018) Enhancements: * [#508][]: Make log level configurable when redirecting the standard @@ -391,14 +407,14 @@ Thanks to @DiSiqueira and @djui for their contributions to this release. [#577]: https://github.com/uber-go/zap/pull/577 [#574]: https://github.com/uber-go/zap/pull/574 -## v1.7.1 (25 Sep 2017) +## 1.7.1 (25 Sep 2017) Bugfixes: * [#504][]: Store strings when using AddByteString with the map encoder. [#504]: https://github.com/uber-go/zap/pull/504 -## v1.7.0 (21 Sep 2017) +## 1.7.0 (21 Sep 2017) Enhancements: @@ -407,7 +423,7 @@ Enhancements: [#487]: https://github.com/uber-go/zap/pull/487 -## v1.6.0 (30 Aug 2017) +## 1.6.0 (30 Aug 2017) Enhancements: @@ -418,7 +434,7 @@ Enhancements: [#490]: https://github.com/uber-go/zap/pull/490 [#491]: https://github.com/uber-go/zap/pull/491 -## v1.5.0 (22 Jul 2017) +## 1.5.0 (22 Jul 2017) Enhancements: @@ -436,7 +452,7 @@ Thanks to @richard-tunein and @pavius for their contributions to this release. [#460]: https://github.com/uber-go/zap/pull/460 [#470]: https://github.com/uber-go/zap/pull/470 -## v1.4.1 (08 Jun 2017) +## 1.4.1 (08 Jun 2017) This release fixes two bugs. @@ -448,7 +464,7 @@ Bugfixes: [#435]: https://github.com/uber-go/zap/pull/435 [#444]: https://github.com/uber-go/zap/pull/444 -## v1.4.0 (12 May 2017) +## 1.4.0 (12 May 2017) This release adds a few small features and is fully backward-compatible. @@ -464,7 +480,7 @@ Enhancements: [#425]: https://github.com/uber-go/zap/pull/425 [#431]: https://github.com/uber-go/zap/pull/431 -## v1.3.0 (25 Apr 2017) +## 1.3.0 (25 Apr 2017) This release adds an enhancement to zap's testing helpers as well as the ability to marshal an AtomicLevel. It is fully backward-compatible. @@ -478,7 +494,7 @@ Enhancements: [#415]: https://github.com/uber-go/zap/pull/415 [#416]: https://github.com/uber-go/zap/pull/416 -## v1.2.0 (13 Apr 2017) +## 1.2.0 (13 Apr 2017) This release adds a gRPC compatibility wrapper. It is fully backward-compatible. @@ -489,7 +505,7 @@ Enhancements: [#402]: https://github.com/uber-go/zap/pull/402 -## v1.1.0 (31 Mar 2017) +## 1.1.0 (31 Mar 2017) This release fixes two bugs and adds some enhancements to zap's testing helpers. It is fully backward-compatible. @@ -510,7 +526,7 @@ Thanks to @moitias for contributing to this release. [#396]: https://github.com/uber-go/zap/pull/396 [#386]: https://github.com/uber-go/zap/pull/386 -## v1.0.0 (14 Mar 2017) +## 1.0.0 (14 Mar 2017) This is zap's first stable release. All exported APIs are now final, and no further breaking changes will be made in the 1.x release series. Anyone using a @@ -569,7 +585,7 @@ contributions to this release. [#365]: https://github.com/uber-go/zap/pull/365 [#372]: https://github.com/uber-go/zap/pull/372 -## v1.0.0-rc.3 (7 Mar 2017) +## 1.0.0-rc.3 (7 Mar 2017) This is the third release candidate for zap's stable release. There are no breaking changes. @@ -595,7 +611,7 @@ Thanks to @ansel1 and @suyash for their contributions to this release. [#353]: https://github.com/uber-go/zap/pull/353 [#311]: https://github.com/uber-go/zap/pull/311 -## v1.0.0-rc.2 (21 Feb 2017) +## 1.0.0-rc.2 (21 Feb 2017) This is the second release candidate for zap's stable release. It includes two breaking changes. @@ -641,7 +657,7 @@ Thanks to @skipor and @chapsuk for their contributions to this release. [#326]: https://github.com/uber-go/zap/pull/326 [#300]: https://github.com/uber-go/zap/pull/300 -## v1.0.0-rc.1 (14 Feb 2017) +## 1.0.0-rc.1 (14 Feb 2017) This is the first release candidate for zap's stable release. There are multiple breaking changes and improvements from the pre-release version. Most notably: @@ -661,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably: * Sampling is more accurate, and doesn't depend on the standard library's shared timer heap. -## v0.1.0-beta.1 (6 Feb 2017) +## 0.1.0-beta.1 (6 Feb 2017) This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and upgrade at their leisure. Since this is the first tagged release, there are no diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE similarity index 100% rename from vendor/go.uber.org/zap/LICENSE.txt rename to vendor/go.uber.org/zap/LICENSE diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index 9de08927..a17035cb 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -1,7 +1,16 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +# :zap: zap + + +
Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -66,41 +75,44 @@ Log a message and 10 fields: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 1744 ns/op | +0% | 5 allocs/op -| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op -| zerolog | 918 ns/op | -47% | 1 allocs/op -| go-kit | 5590 ns/op | +221% | 57 allocs/op -| slog | 5640 ns/op | +223% | 40 allocs/op -| apex/log | 21184 ns/op | +1115% | 63 allocs/op -| logrus | 24338 ns/op | +1296% | 79 allocs/op -| log15 | 26054 ns/op | +1394% | 74 allocs/op +| :zap: zap | 656 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op +| zerolog | 380 ns/op | -42% | 1 allocs/op +| go-kit | 2249 ns/op | +243% | 57 allocs/op +| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op +| slog | 2481 ns/op | +278% | 42 allocs/op +| apex/log | 9591 ns/op | +1362% | 63 allocs/op +| log15 | 11393 ns/op | +1637% | 75 allocs/op +| logrus | 11654 ns/op | +1677% | 79 allocs/op Log a message with a logger that already has 10 fields of context: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 193 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op -| zerolog | 81 ns/op | -58% | 0 allocs/op -| slog | 322 ns/op | +67% | 0 allocs/op -| go-kit | 5377 ns/op | +2686% | 56 allocs/op -| apex/log | 19518 ns/op | +10013% | 53 allocs/op -| log15 | 19812 ns/op | +10165% | 70 allocs/op -| logrus | 21997 ns/op | +11297% | 68 allocs/op +| :zap: zap | 67 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op +| zerolog | 35 ns/op | -48% | 0 allocs/op +| slog | 193 ns/op | +188% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op +| go-kit | 2460 ns/op | +3572% | 56 allocs/op +| log15 | 9038 ns/op | +13390% | 70 allocs/op +| apex/log | 9068 ns/op | +13434% | 53 allocs/op +| logrus | 10521 ns/op | +15603% | 68 allocs/op Log a static string, without any context or `printf`-style templating: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 165 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op -| zerolog | 95 ns/op | -42% | 0 allocs/op -| slog | 296 ns/op | +79% | 0 allocs/op -| go-kit | 415 ns/op | +152% | 9 allocs/op -| standard library | 422 ns/op | +156% | 2 allocs/op -| apex/log | 1601 ns/op | +870% | 5 allocs/op -| logrus | 3017 ns/op | +1728% | 23 allocs/op -| log15 | 3469 ns/op | +2002% | 20 allocs/op +| :zap: zap | 63 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op +| zerolog | 32 ns/op | -49% | 0 allocs/op +| standard library | 124 ns/op | +97% | 1 allocs/op +| slog | 196 ns/op | +211% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op +| go-kit | 213 ns/op | +238% | 9 allocs/op +| apex/log | 771 ns/op | +1124% | 5 allocs/op +| logrus | 1439 ns/op | +2184% | 23 allocs/op +| log15 | 2069 ns/op | +3184% | 20 allocs/op ## Development Status: Stable @@ -120,7 +132,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 27fb5cd5..0b8540c2 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -42,7 +42,7 @@ func (b *Buffer) AppendByte(v byte) { b.bs = append(b.bs, v) } -// AppendBytes writes a single byte to the Buffer. +// AppendBytes writes the given slice of bytes to the Buffer. func (b *Buffer) AppendBytes(v []byte) { b.bs = append(b.bs, v...) } diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index c8dd3358..6743930b 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -460,6 +460,8 @@ func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { // - https://github.com/uber-go/zap/pull/1304 // - https://github.com/uber-go/zap/pull/1305 // - https://github.com/uber-go/zap/pull/1308 +// +// See https://github.com/golang/go/issues/62077 for upstream issue. type anyFieldC[T any] func(string, T) Field func (f anyFieldC[T]) Any(key string, val any) Field { diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index 6205fe48..c4d30032 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -43,6 +43,7 @@ type Logger struct { development bool addCaller bool + onPanic zapcore.CheckWriteHook // default is WriteThenPanic onFatal zapcore.CheckWriteHook // default is WriteThenFatal name string @@ -345,27 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Set up any required terminal behavior. switch ent.Level { case zapcore.PanicLevel: - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) case zapcore.FatalLevel: - onFatal := log.onFatal - // nil or WriteThenNoop will lead to continued execution after - // a Fatal log entry, which is unexpected. For example, - // - // f, err := os.Open(..) - // if err != nil { - // log.Fatal("cannot open", zap.Error(err)) - // } - // fmt.Println(f.Name()) - // - // The f.Name() will panic if we continue execution after the - // log.Fatal. - if onFatal == nil || onFatal == zapcore.WriteThenNoop { - onFatal = zapcore.WriteThenFatal - } - ce = ce.After(ent, onFatal) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal)) case zapcore.DPanicLevel: if log.development { - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) } } @@ -430,3 +416,20 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return ce } + +func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook { + // A nil or WriteThenNoop hook will lead to continued execution after + // a Panic or Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the log.Fatal. + if override == nil || override == zapcore.WriteThenNoop { + return defaultHook + } + return override +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index c4f3bca3..43d357ac 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -132,6 +132,21 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { }) } +// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs. +// Zap will call this hook after writing a log statement with a Panic/DPanic level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a Panic/DPanic log message, but it will not start a panic. +// +// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit)) +// +// This is useful for testing Panic/DPanic log output. +func WithPanicHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onPanic = hook + }) +} + // OnFatal sets the action to take on fatal logs. // // Deprecated: Use [WithFatalHook] instead. diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index 00ac5fe3..8904cd08 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -115,6 +115,21 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } +// WithLazy adds a variadic number of fields to the logging context lazily. +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// Similar to [With], fields added to the child don't affect the parent, +// and vice versa. Also, the keys in key-value pairs should be strings. In development, +// passing a non-string key panics, while in production it logs an error and skips the pair. +// Passing an orphaned key has the same behavior. +func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)} +} + // Level reports the minimum enabled level for this logger. // // For NopLoggers, this is [zapcore.InvalidLevel]. @@ -122,6 +137,12 @@ func (s *SugaredLogger) Level() zapcore.Level { return zapcore.LevelOf(s.base.core) } +// Log logs the provided arguments at provided level. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) { + s.log(lvl, "", args, nil) +} + // Debug logs the provided arguments at [DebugLevel]. // Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Debug(args ...interface{}) { @@ -165,6 +186,12 @@ func (s *SugaredLogger) Fatal(args ...interface{}) { s.log(FatalLevel, "", args, nil) } +// Logf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) { + s.log(lvl, template, args, nil) +} + // Debugf formats the message according to the format specifier // and logs it at [DebugLevel]. func (s *SugaredLogger) Debugf(template string, args ...interface{}) { @@ -208,6 +235,12 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { s.log(FatalLevel, template, args, nil) } +// Logw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(lvl, msg, nil, keysAndValues) +} + // Debugw logs a message with some additional context. The variadic key-value // pairs are treated as they are in With. // @@ -255,6 +288,12 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { s.log(FatalLevel, msg, nil, keysAndValues) } +// Logln logs a message at provided level. +// Spaces are always added between arguments. +func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) { + s.logln(lvl, args, nil) +} + // Debugln logs a message at [DebugLevel]. // Spaces are always added between arguments. func (s *SugaredLogger) Debugln(args ...interface{}) { diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index 8ca0bfaf..cc2b4e07 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // If this ever becomes a performance bottleneck, we can implement // ArrayEncoder for our plain-text format. arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { + if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() { c.EncodeTime(ent.Time, arr) } if c.LevelKey != "" && c.EncodeLevel != nil { diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go index 5769ff3e..04462541 100644 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -37,6 +37,9 @@ const DefaultLineEnding = "\n" const OmitKey = "" // A LevelEncoder serializes a Level to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type LevelEncoder func(Level, PrimitiveArrayEncoder) // LowercaseLevelEncoder serializes a Level to a lowercase string. For example, @@ -90,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error { } // A TimeEncoder serializes a time.Time to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type TimeEncoder func(time.Time, PrimitiveArrayEncoder) // EpochTimeEncoder serializes a time.Time to a floating-point number of seconds @@ -219,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error { } // A DurationEncoder serializes a time.Duration to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) // SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. @@ -262,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { } // A CallerEncoder serializes an EntryCaller to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) // FullCallerEncoder serializes a caller in /full/path/to/package/file:line @@ -292,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error { // A NameEncoder serializes a period-separated logger name to a primitive // type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type NameEncoder func(string, PrimitiveArrayEncoder) // FullNameEncoder serializes the logger name as-is. diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index 95bdb0a1..308c9781 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -47,7 +47,7 @@ const ( ByteStringType // Complex128Type indicates that the field carries a complex128. Complex128Type - // Complex64Type indicates that the field carries a complex128. + // Complex64Type indicates that the field carries a complex64. Complex64Type // DurationType indicates that the field carries a time.Duration. DurationType diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index c8ab8697..9685169b 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -372,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.Level.String()) } } - if final.TimeKey != "" { + if final.TimeKey != "" && !ent.Time.IsZero() { final.AddTime(final.TimeKey, ent.Time) } if ent.LoggerName != "" && final.NameKey != "" { diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index de67f938..3c57880d 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() { return } switch c { - case ' ', '\n', '\r', '\t', '\f', '/': - z.pendingAttr[0].end = z.raw.end - 1 - return case '=': if z.pendingAttr[0].start+1 == z.raw.end { // WHATWG 13.2.5.32, if we see an equals sign before the attribute name @@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() { continue } fallthrough - case '>': + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // WHATWG 13.2.5.33 Attribute name state + // We need to reconsume the char in the after attribute name state to support the / character z.raw.end-- z.pendingAttr[0].end = z.raw.end return @@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() { if z.err != nil { return } + if c == '/' { + // WHATWG 13.2.5.34 After attribute name state + // U+002F SOLIDUS (/) - Switch to the self-closing start tag state. + return + } if c != '=' { z.raw.end-- return diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c1f6b90d..43557ab7 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and @@ -1565,6 +1564,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true + remainSize = 0 return } remainSize -= size @@ -1577,6 +1577,36 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + if _, err := hdec.Write(frag); err != nil { return nil, ConnectionError(ErrCodeCompression) } diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 684d984f..3b9f06b9 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var errClosedPipeWrite = errors.New("write on closed buffer") +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ae94c640..ce2e8b40 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -124,6 +124,7 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. + // If zero or negative, there is no timeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow @@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -924,7 +925,7 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } @@ -1637,7 +1638,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -2017,7 +2018,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } @@ -2038,7 +2039,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) } @@ -2116,7 +2117,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go new file mode 100644 index 00000000..61075bd1 --- /dev/null +++ b/vendor/golang.org/x/net/http2/testsync.go @@ -0,0 +1,331 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import ( + "context" + "sync" + "time" +) + +// testSyncHooks coordinates goroutines in tests. +// +// For example, a call to ClientConn.RoundTrip involves several goroutines, including: +// - the goroutine running RoundTrip; +// - the clientStream.doRequest goroutine, which writes the request; and +// - the clientStream.readLoop goroutine, which reads the response. +// +// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines +// are blocked waiting for some condition such as reading the Request.Body or waiting for +// flow control to become available. +// +// The testSyncHooks also manage timers and synthetic time in tests. +// This permits us to, for example, start a request and cause it to time out waiting for +// response headers without resorting to time.Sleep calls. +type testSyncHooks struct { + // active/inactive act as a mutex and condition variable. + // + // - neither chan contains a value: testSyncHooks is locked. + // - active contains a value: unlocked, and at least one goroutine is not blocked + // - inactive contains a value: unlocked, and all goroutines are blocked + active chan struct{} + inactive chan struct{} + + // goroutine counts + total int // total goroutines + condwait map[*sync.Cond]int // blocked in sync.Cond.Wait + blocked []*testBlockedGoroutine // otherwise blocked + + // fake time + now time.Time + timers []*fakeTimer + + // Transport testing: Report various events. + newclientconn func(*ClientConn) + newstream func(*clientStream) +} + +// testBlockedGoroutine is a blocked goroutine. +type testBlockedGoroutine struct { + f func() bool // blocked until f returns true + ch chan struct{} // closed when unblocked +} + +func newTestSyncHooks() *testSyncHooks { + h := &testSyncHooks{ + active: make(chan struct{}, 1), + inactive: make(chan struct{}, 1), + condwait: map[*sync.Cond]int{}, + } + h.inactive <- struct{}{} + h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + return h +} + +// lock acquires the testSyncHooks mutex. +func (h *testSyncHooks) lock() { + select { + case <-h.active: + case <-h.inactive: + } +} + +// waitInactive waits for all goroutines to become inactive. +func (h *testSyncHooks) waitInactive() { + for { + <-h.inactive + if !h.unlock() { + break + } + } +} + +// unlock releases the testSyncHooks mutex. +// It reports whether any goroutines are active. +func (h *testSyncHooks) unlock() (active bool) { + // Look for a blocked goroutine which can be unblocked. + blocked := h.blocked[:0] + unblocked := false + for _, b := range h.blocked { + if !unblocked && b.f() { + unblocked = true + close(b.ch) + } else { + blocked = append(blocked, b) + } + } + h.blocked = blocked + + // Count goroutines blocked on condition variables. + condwait := 0 + for _, count := range h.condwait { + condwait += count + } + + if h.total > condwait+len(blocked) { + h.active <- struct{}{} + return true + } else { + h.inactive <- struct{}{} + return false + } +} + +// goRun starts a new goroutine. +func (h *testSyncHooks) goRun(f func()) { + h.lock() + h.total++ + h.unlock() + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + f() + }() +} + +// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. +// It waits until f returns true before proceeding. +// +// Example usage: +// +// h.blockUntil(func() bool { +// // Is the context done yet? +// select { +// case <-ctx.Done(): +// default: +// return false +// } +// return true +// }) +// // Wait for the context to become done. +// <-ctx.Done() +// +// The function f passed to blockUntil must be non-blocking and idempotent. +func (h *testSyncHooks) blockUntil(f func() bool) { + if f() { + return + } + ch := make(chan struct{}) + h.lock() + h.blocked = append(h.blocked, &testBlockedGoroutine{ + f: f, + ch: ch, + }) + h.unlock() + <-ch +} + +// broadcast is sync.Cond.Broadcast. +func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { + h.lock() + delete(h.condwait, cond) + h.unlock() + cond.Broadcast() +} + +// broadcast is sync.Cond.Wait. +func (h *testSyncHooks) condWait(cond *sync.Cond) { + h.lock() + h.condwait[cond]++ + h.unlock() +} + +// newTimer creates a new fake timer. +func (h *testSyncHooks) newTimer(d time.Duration) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + c: make(chan time.Time), + } + h.timers = append(h.timers, t) + return t +} + +// afterFunc creates a new fake AfterFunc timer. +func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + f: f, + } + h.timers = append(h.timers, t) + return t +} + +func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(ctx) + t := h.afterFunc(d, cancel) + return ctx, func() { + t.Stop() + cancel() + } +} + +func (h *testSyncHooks) timeUntilEvent() time.Duration { + h.lock() + defer h.unlock() + var next time.Time + for _, t := range h.timers { + if next.IsZero() || t.when.Before(next) { + next = t.when + } + } + if d := next.Sub(h.now); d > 0 { + return d + } + return 0 +} + +// advance advances time and causes synthetic timers to fire. +func (h *testSyncHooks) advance(d time.Duration) { + h.lock() + defer h.unlock() + h.now = h.now.Add(d) + timers := h.timers[:0] + for _, t := range h.timers { + t := t // remove after go.mod depends on go1.22 + t.mu.Lock() + switch { + case t.when.After(h.now): + timers = append(timers, t) + case t.when.IsZero(): + // stopped timer + default: + t.when = time.Time{} + if t.c != nil { + close(t.c) + } + if t.f != nil { + h.total++ + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + t.f() + }() + } + } + t.mu.Unlock() + } + h.timers = timers +} + +// A timer wraps a time.Timer, or a synthetic equivalent in tests. +// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. +type timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +// timeTimer implements timer using real time. +type timeTimer struct { + t *time.Timer + c chan time.Time +} + +// newTimeTimer creates a new timer using real time. +func newTimeTimer(d time.Duration) timer { + ch := make(chan time.Time) + t := time.AfterFunc(d, func() { + close(ch) + }) + return &timeTimer{t, ch} +} + +// newTimeAfterFunc creates an AfterFunc timer using real time. +func newTimeAfterFunc(d time.Duration, f func()) timer { + return &timeTimer{ + t: time.AfterFunc(d, f), + } +} + +func (t timeTimer) C() <-chan time.Time { return t.c } +func (t timeTimer) Stop() bool { return t.t.Stop() } +func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } + +// fakeTimer implements timer using fake time. +type fakeTimer struct { + hooks *testSyncHooks + + mu sync.Mutex + when time.Time // when the timer will fire + c chan time.Time // closed when the timer fires; mutually exclusive with f + f func() // called when the timer fires; mutually exclusive with c +} + +func (t *fakeTimer) C() <-chan time.Time { return t.c } + +func (t *fakeTimer) Stop() bool { + t.mu.Lock() + defer t.mu.Unlock() + stopped := t.when.IsZero() + t.when = time.Time{} + return stopped +} + +func (t *fakeTimer) Reset(d time.Duration) bool { + if t.c != nil || t.f == nil { + panic("fakeTimer only supports Reset on AfterFunc timers") + } + t.mu.Lock() + defer t.mu.Unlock() + t.hooks.lock() + defer t.hooks.unlock() + active := !t.when.IsZero() + t.when = t.hooks.now.Add(d) + if !active { + t.hooks.timers = append(t.hooks.timers, t) + } + return active +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index df578b86..ce375c8c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -147,6 +147,12 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -178,6 +184,8 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool + + syncHooks *testSyncHooks } func (t *Transport) maxHeaderListSize() uint32 { @@ -302,7 +310,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer + idleTimer timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -344,6 +352,60 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder + + syncHooks *testSyncHooks // can be nil +} + +// Hook points used for testing. +// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +// goRun starts a new goroutine. +func (cc *ClientConn) goRun(f func()) { + if cc.syncHooks != nil { + cc.syncHooks.goRun(f) + return + } + go f() +} + +// condBroadcast is cc.cond.Broadcast. +func (cc *ClientConn) condBroadcast() { + if cc.syncHooks != nil { + cc.syncHooks.condBroadcast(cc.cond) + } + cc.cond.Broadcast() +} + +// condWait is cc.cond.Wait. +func (cc *ClientConn) condWait() { + if cc.syncHooks != nil { + cc.syncHooks.condWait(cc.cond) + } + cc.cond.Wait() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (cc *ClientConn) newTimer(d time.Duration) timer { + if cc.syncHooks != nil { + return cc.syncHooks.newTimer(d) + } + return newTimeTimer(d) +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { + if cc.syncHooks != nil { + return cc.syncHooks.afterFunc(d, f) + } + return newTimeAfterFunc(d, f) +} + +func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if cc.syncHooks != nil { + return cc.syncHooks.contextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -425,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.cond.Broadcast() + cs.cc.condBroadcast() } } @@ -435,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.cond.Broadcast() + cc.condBroadcast() } } @@ -445,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - go func() { + cs.cc.goRun(func() { cs.reqBody.Close() close(reqBodyClosed) - }() + }) } type stickyErrWriter struct { @@ -537,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } -var retryBackoffHook func(time.Duration) *time.Timer - -func backoffNewTimer(d time.Duration) *time.Timer { - if retryBackoffHook != nil { - return retryBackoffHook(d) - } - return time.NewTimer(d) -} - // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -573,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - timer := backoffNewTimer(d) + var tm timer + if t.syncHooks != nil { + tm = t.syncHooks.newTimer(d) + t.syncHooks.blockUntil(func() bool { + select { + case <-tm.C(): + case <-req.Context().Done(): + default: + return false + } + return true + }) + } else { + tm = newTimeTimer(d) + } select { - case <-timer.C: + case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - timer.Stop() + tm.Stop() err = req.Context().Err() } } @@ -658,6 +725,9 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + if t.syncHooks != nil { + return t.newClientConn(nil, singleUse, t.syncHooks) + } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -666,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -732,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -750,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + syncHooks: hooks, + } + if hooks != nil { + hooks.newclientconn(cc) + c = cc.tconn } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -818,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return nil, cc.werr } - go cc.readLoop() + cc.goRun(cc.readLoop) return cc, nil } @@ -826,7 +901,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1056,7 +1131,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - go func() { + cc.goRun(func() { cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1068,9 +1143,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.cond.Wait() + cc.condWait() } - }() + }) shutdownEnterWaitStateHook() select { case <-done: @@ -1080,7 +1155,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() return ctx.Err() } @@ -1118,7 +1193,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() cc.closeConn() } @@ -1215,6 +1290,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1229,9 +1308,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - go cs.doRequest(req) + cc.goRun(func() { + cs.doRequest(req) + }) waitDone := func() error { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.donec: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.donec: return nil @@ -1292,7 +1385,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return err } + if streamf != nil { + streamf(cs) + } + for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1348,6 +1458,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + var newStreamHook func(*clientStream) + if cc.syncHooks != nil { + newStreamHook = cc.syncHooks.newstream + cc.syncHooks.blockUntil(func() bool { + select { + case cc.reqHeaderMu <- struct{}{}: + <-cc.reqHeaderMu + case <-cs.reqCancel: + case <-ctx.Done(): + default: + return false + } + return true + }) + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1372,6 +1497,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() + if newStreamHook != nil { + newStreamHook(cs) + } + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && @@ -1452,15 +1581,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) + timer := cc.newTimer(d) defer timer.Stop() - respHeaderTimer = timer.C + respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.peerClosed: + case <-respHeaderTimer: + case <-respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.peerClosed: return nil @@ -1609,7 +1753,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.cond.Wait() + cc.condWait() cc.pendingRequests-- select { case <-cs.abort: @@ -1871,8 +2015,24 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.cond.Wait() + cc.condWait() + } +} + +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } } + return "" } var errNilRequestURL = errors.New("http2: Request.URI is nil") @@ -1912,19 +2072,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers and return an error before we + // Check for any invalid headers+trailers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) - } - } + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) } enumerateHeaders := func(f func(name, value string)) { @@ -2143,7 +2298,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() + cc.condBroadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2231,7 +2386,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() } @@ -2266,10 +2421,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout - var t *time.Timer + var t timer if readIdleTimeout != 0 { - t = time.AfterFunc(readIdleTimeout, cc.healthCheck) - defer t.Stop() + t = cc.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2684,7 +2838,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.firstByte { + if !cs.pastHeaders { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2867,7 +3021,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.cond.Broadcast() + cc.condBroadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -2911,9 +3065,18 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } - cc.cond.Broadcast() + cc.condBroadcast() return nil } @@ -2955,24 +3118,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - errc := make(chan error, 1) - go func() { + var pingError error + errc := make(chan struct{}) + cc.goRun(func() { cc.wmu.Lock() defer cc.wmu.Unlock() - if err := cc.fr.WritePing(false, p); err != nil { - errc <- err + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) return } - if err := cc.bw.Flush(); err != nil { - errc <- err + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) return } - }() + }) + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-c: + case <-errc: + case <-ctx.Done(): + case <-cc.readerDone: + default: + return false + } + return true + }) + } select { case <-c: return nil - case err := <-errc: - return err + case <-errc: + return pingError case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -3141,9 +3318,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { + // to keep things backwards compatible, we use non-zero values of + // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying + // http1 transport, followed by 0 + if t.IdleConnTimeout != 0 { + return t.IdleConnTimeout + } + if t.t1 != nil { return t.t1.IdleConnTimeout } + return 0 } diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 00000000..e99c92f3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 58901bda..e83ddeef 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -18,6 +18,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" ) @@ -115,41 +116,60 @@ const ( AuthStyleInHeader AuthStyle = 2 ) -// authStyleCache is the set of tokenURLs we've successfully used via +// LazyAuthStyleCache is a backwards compatibility compromise to let Configs +// have a lazily-initialized AuthStyleCache. +// +// The two users of this, oauth2.Config and oauth2/clientcredentials.Config, +// both would ideally just embed an unexported AuthStyleCache but because both +// were historically allowed to be copied by value we can't retroactively add an +// uncopyable Mutex to them. +// +// We could use an atomic.Pointer, but that was added recently enough (in Go +// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03 +// still pass. By using an atomic.Value, it supports both Go 1.17 and +// copying by value, even if that's not ideal. +type LazyAuthStyleCache struct { + v atomic.Value // of *AuthStyleCache +} + +func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { + if c, ok := lc.v.Load().(*AuthStyleCache); ok { + return c + } + c := new(AuthStyleCache) + if !lc.v.CompareAndSwap(nil, c) { + c = lc.v.Load().(*AuthStyleCache) + } + return c +} + +// AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that // the set of OAuth2 servers a program contacts over time is fixed and // small. -var authStyleCache struct { - sync.Mutex - m map[string]AuthStyle // keyed by tokenURL -} - -// ResetAuthCache resets the global authentication style cache used -// for AuthStyleUnknown token requests. -func ResetAuthCache() { - authStyleCache.Lock() - defer authStyleCache.Unlock() - authStyleCache.m = nil +type AuthStyleCache struct { + mu sync.Mutex + m map[string]AuthStyle // keyed by tokenURL } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - style, ok = authStyleCache.m[tokenURL] +func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + style, ok = c.m[tokenURL] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func setAuthStyle(tokenURL string, v AuthStyle) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - if authStyleCache.m == nil { - authStyleCache.m = make(map[string]AuthStyle) +func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[string]AuthStyle) } - authStyleCache.m[tokenURL] = v + c.m[tokenURL] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -189,10 +209,10 @@ func cloneURLValues(v url.Values) url.Values { return v2 } -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { needsAuthStyleProbe := authStyle == 0 if needsAuthStyleProbe { - if style, ok := lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -222,7 +242,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 9085fabe..90a2c3d6 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -58,6 +58,10 @@ type Config struct { // Scope specifies optional requested permissions. Scopes []string + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache } // A TokenSource is anything that can return a token. @@ -71,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -139,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -162,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -207,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 00000000..50593b6d --- /dev/null +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5ffce976..5bbb3321 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -164,7 +164,7 @@ func tokenFromInternal(t *internal.Token) *Token { // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*RetrieveError)(rErr) diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index e7d3df4b..b0e41985 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index c6492020..fdcaa974 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -584,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index 4b68e597..7f602ffd 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 16dc6993..2f0fa76e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4d..2b57e0f7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0f85e29e..5682e262 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1849,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b473038c..27c41b6f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -1520,6 +1520,14 @@ func (m *mmapper) Munmap(data []byte) (err error) { return nil } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index a5d3ff8d..36bf8399 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1785,6 +1785,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -2465,6 +2467,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2669,8 +2672,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2913,9 +2917,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3075,6 +3108,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4920821c..42ff8c3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a0c1e411..dca43600 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -282,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c6398556..5cca668a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -288,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 47cc62e2..d8cae6d1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -278,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 27ac4a09..28e39afd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 54694642..cd66e92c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 3adb81d7..c1595eba 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 2dfe98f0..ee9456b0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index f5398f84..8cfca81e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c54f152d..60b0deb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -336,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 76057dc7..f90aa728 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e0c3725e..ba9e0150 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 18f2813e..07cdfd6e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -272,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 11619d4e..2f1dd214 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -344,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 396d994d..f40519d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -335,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1488d271..87d8612a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -906,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fcf3ecbd..0cc3ce49 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -448,4 +448,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f56dc250..856d92d6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -371,4 +371,7 @@ const ( SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 974bf246..8d467094 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -412,4 +412,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 39a2739e..edc17324 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -315,4 +315,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index cf9c9d77..445eba20 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -309,4 +309,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 10b7362e..adba01bc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index cd4d8b4f..014c4e9c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 2c0efca8..ccc97d74 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index a72e31d3..ec2b64a9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index c7d1e374..21a839e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -439,4 +439,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index f4d4838c..c11121ec 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index b64f0e59..909b631f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 95711195..e49bed16 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -316,4 +316,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f94e943b..66017d2d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -377,4 +377,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index ba0c2bc5..47bab18d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -390,4 +390,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index bbf8399f..eff6bcde 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -174,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -455,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -551,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -832,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1546,6 +1559,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1561,6 +1575,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1608,6 +1623,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1645,6 +1663,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1666,6 +1692,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1689,9 +1718,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1722,6 +1764,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1736,6 +1780,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1748,6 +1793,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1777,6 +1824,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1792,6 +1842,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1850,8 +1901,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1881,6 +1940,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( @@ -3399,7 +3463,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4183,7 +4247,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -5134,7 +5199,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1c NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5547,7 +5612,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index b8ad1925..d4577a42 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index ffb8708c..6525c62f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } @@ -166,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -349,8 +349,19 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -1835,3 +1846,73 @@ func ResizePseudoConsole(pconsole Handle, size Coord) error { // accept arguments that can be casted to uintptr, and Coord can't. return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) } + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 359780f6..d8cb71db 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -3380,3 +3380,27 @@ type BLOB struct { Size uint32 BlobData *byte } + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index e8791c82..5c6035dd 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -188,6 +188,8 @@ var ( procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") @@ -212,7 +214,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -236,6 +240,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -322,6 +328,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -335,6 +342,9 @@ var ( procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -342,7 +352,6 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -351,6 +360,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -361,6 +371,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -379,6 +390,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -1641,6 +1653,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1845,6 +1873,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1857,6 +1893,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2058,6 +2102,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2810,6 +2870,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2924,6 +2992,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2989,14 +3081,6 @@ func SetEndOfFile(handle Handle) (err error) { return } -func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { @@ -3060,6 +3144,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3145,6 +3237,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3291,6 +3391,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go new file mode 100644 index 00000000..2ef36bbc --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodelim marshals and unmarshals varint size-delimited messages. +package protodelim + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" +) + +// MarshalOptions is a configurable varint size-delimited marshaler. +type MarshalOptions struct{ proto.MarshalOptions } + +// MarshalTo writes a varint size-delimited wire-format message to w. +// If w returns an error, MarshalTo returns it unchanged. +func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) { + msgBytes, err := o.MarshalOptions.Marshal(m) + if err != nil { + return 0, err + } + + sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes))) + sizeWritten, err := w.Write(sizeBytes) + if err != nil { + return sizeWritten, err + } + msgWritten, err := w.Write(msgBytes) + if err != nil { + return sizeWritten + msgWritten, err + } + return sizeWritten + msgWritten, nil +} + +// MarshalTo writes a varint size-delimited wire-format message to w +// with the default options. +// +// See the documentation for [MarshalOptions.MarshalTo]. +func MarshalTo(w io.Writer, m proto.Message) (int, error) { + return MarshalOptions{}.MarshalTo(w, m) +} + +// UnmarshalOptions is a configurable varint size-delimited unmarshaler. +type UnmarshalOptions struct { + proto.UnmarshalOptions + + // MaxSize is the maximum size in wire-format bytes of a single message. + // Unmarshaling a message larger than MaxSize will return an error. + // A zero MaxSize will default to 4 MiB. + // Setting MaxSize to -1 disables the limit. + MaxSize int64 +} + +const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size + +// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size +// that is larger than its configured [UnmarshalOptions.MaxSize]. +type SizeTooLargeError struct { + // Size is the varint size of the message encountered + // that was larger than the provided MaxSize. + Size uint64 + + // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded. + MaxSize uint64 +} + +func (e *SizeTooLargeError) Error() string { + return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize) +} + +// Reader is the interface expected by [UnmarshalFrom]. +// It is implemented by *[bufio.Reader]. +type Reader interface { + io.Reader + io.ByteReader +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// The error is [io.EOF] error only if no bytes are read. +// If an EOF happens after reading some but not all the bytes, +// UnmarshalFrom returns a non-io.EOF error. +// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged, +// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned. +func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error { + var sizeArr [binary.MaxVarintLen64]byte + sizeBuf := sizeArr[:0] + for i := range sizeArr { + b, err := r.ReadByte() + if err != nil { + // Immediate EOF is unexpected. + if err == io.EOF && i != 0 { + break + } + return err + } + sizeBuf = append(sizeBuf, b) + if b < 0x80 { + break + } + } + size, n := protowire.ConsumeVarint(sizeBuf) + if n < 0 { + return protowire.ParseError(n) + } + + maxSize := o.MaxSize + if maxSize == 0 { + maxSize = defaultMaxSize + } + if maxSize != -1 && size > uint64(maxSize) { + return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "") + } + + var b []byte + var err error + if br, ok := r.(*bufio.Reader); ok { + // Use the []byte from the bufio.Reader instead of having to allocate one. + // This reduces CPU usage and allocated bytes. + b, err = br.Peek(int(size)) + if err == nil { + defer br.Discard(int(size)) + } else { + b = nil + } + } + if b == nil { + b = make([]byte, size) + _, err = io.ReadFull(r, b) + } + + if err == io.EOF { + return io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if err := o.Unmarshal(b, m); err != nil { + return err + } + return nil +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r with the default options. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the documentation for [UnmarshalOptions.UnmarshalFrom]. +func UnmarshalFrom(r Reader, m proto.Message) error { + return UnmarshalOptions{}.UnmarshalFrom(r, m) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 5f28148d..f4790237 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -11,6 +11,7 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/json" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" @@ -23,7 +24,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -37,7 +38,7 @@ type UnmarshalOptions struct { // required fields will not return an error. AllowPartial bool - // If DiscardUnknown is set, unknown fields are ignored. + // If DiscardUnknown is set, unknown fields and enum name values are ignored. DiscardUnknown bool // Resolver is used for looking up types when unmarshaling @@ -47,9 +48,13 @@ type UnmarshalOptions struct { protoregistry.MessageTypeResolver protoregistry.ExtensionTypeResolver } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // It will clear the message first before setting the fields. // If it returns an error, the given message may be partially set. @@ -67,6 +72,9 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { if o.Resolver == nil { o.Resolver = protoregistry.GlobalTypes } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } dec := decoder{json.NewDecoder(b), o} if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { @@ -114,6 +122,10 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { // unmarshalMessage unmarshals a message into the given protoreflect.Message. func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { return unmarshal(d, m) } @@ -266,7 +278,9 @@ func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.Field if err != nil { return err } - m.Set(fd, val) + if val.IsValid() { + m.Set(fd, val) + } return nil } @@ -329,7 +343,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. } case protoreflect.EnumKind: - if v, ok := unmarshalEnum(tok, fd); ok { + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { return v, nil } @@ -474,7 +488,7 @@ func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { return protoreflect.ValueOfBytes(b), true } -func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { switch tok.Kind() { case json.String: // Lookup EnumNumber based on name. @@ -482,6 +496,9 @@ func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflec if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { return protoreflect.ValueOfEnum(enumVal.Number()), true } + if discardUnknown { + return protoreflect.Value{}, true + } case json.Number: if n, ok := tok.Int(32); ok { @@ -542,7 +559,9 @@ func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDesc if err != nil { return err } - list.Append(val) + if val.IsValid() { + list.Append(val) + } } } @@ -609,8 +628,9 @@ Loop: if err != nil { return err } - - mmap.Set(pkey, pval) + if pval.IsValid() { + mmap.Set(pkey, pval) + } } return nil diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index 21d5d2cb..ae71007c 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -6,6 +6,6 @@ // format. It follows the guide at // https://protobuf.dev/programming-guides/proto3#json. // -// This package produces a different output than the standard "encoding/json" +// This package produces a different output than the standard [encoding/json] // package, which does not operate correctly on protocol buffer messages. package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 66b95870..3f75098b 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -31,7 +31,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in JSON format using default options. +// Marshal writes the given [proto.Message] in JSON format using default options. // Do not depend on the output being stable. It may change over time across // different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -81,6 +81,25 @@ type MarshalOptions struct { // â•šâ•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• EmitUnpopulated bool + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // â•”â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— + // â•‘ JSON │ Protobuf field â•‘ + // â• â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•£ + // â•‘ false │ non-optional scalar boolean fields â•‘ + // â•‘ 0 │ non-optional scalar numeric fields â•‘ + // â•‘ "" │ non-optional scalar string/byte fields â•‘ + // â•‘ [] │ empty repeated fields â•‘ + // â•‘ {} │ empty map fields â•‘ + // â•šâ•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + // Resolver is used for looking up types when expanding google.protobuf.Any // messages. If nil, this defaults to using protoregistry.GlobalTypes. Resolver interface { @@ -102,7 +121,7 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal marshals the given proto.Message in the JSON format using options in +// Marshal marshals the given [proto.Message] in the JSON format using options in // MarshalOptions. Do not depend on the output being stable. It may change over // time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { @@ -178,7 +197,11 @@ func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protorefl // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range // method to additionally iterate over unpopulated fields. -type unpopulatedFieldRanger struct{ protoreflect.Message } +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { fds := m.Descriptor().Fields() @@ -192,6 +215,9 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil if isProto2Scalar || isSingularMessage { + if m.skipNull { + continue + } v = protoreflect.Value{} // use invalid value to emit null } if !f(fd, v) { @@ -217,8 +243,11 @@ func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { defer e.EndObject() var fields order.FieldRanger = m - if e.opts.EmitUnpopulated { - fields = unpopulatedFieldRanger{m} + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} } if typeURL != "" { fields = typeURLFieldRanger{fields, typeURL} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 6c37d417..4b177c82 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m protoreflect.Message) error { // Use another decoder to parse the unread bytes for @type field. This // avoids advancing a read from current decoder because the current JSON // object may contain the fields of the embedded type. - dec := decoder{d.Clone(), UnmarshalOptions{}} + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} tok, err := findTypeURL(dec) switch err { case errEmptyObject: @@ -308,48 +308,29 @@ Loop: // array) in order to advance the read to the next JSON value. It relies on // the decoder returning an error if the types are not in valid sequence. func (d decoder) skipJSONValue() error { - tok, err := d.Read() - if err != nil { - return err - } - // Only need to continue reading for objects and arrays. - switch tok.Kind() { - case json.ObjectOpen: - for { - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case json.ObjectClose: - return nil - case json.Name: - // Skip object field value. - if err := d.skipJSONValue(); err != nil { - return err - } - } + var open int + for { + tok, err := d.Read() + if err != nil { + return err } - - case json.ArrayOpen: - for { - tok, err := d.Peek() - if err != nil { - return err - } - switch tok.Kind() { - case json.ArrayClose: - d.Read() - return nil - default: - // Skip array item. - if err := d.skipJSONValue(); err != nil { - return err - } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") + } + if open == 0 { + return nil } } - return nil } // unmarshalAnyValue unmarshals the given custom-type message from the JSON diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 4921b2d4..a45f112b 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -21,7 +21,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -51,7 +51,7 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { @@ -739,7 +739,9 @@ func (d decoder) skipValue() error { case text.ListClose: return nil case text.MessageOpen: - return d.skipMessageValue() + if err := d.skipMessageValue(); err != nil { + return err + } default: // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 722a7b41..95967e81 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -33,7 +33,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in textproto format using default +// Marshal writes the given [proto.Message] in textproto format using default // options. Do not depend on the output being stable. It may change over time // across different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -97,7 +97,7 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal writes the given proto.Message in textproto format using options in +// Marshal writes the given [proto.Message] in textproto format using options in // MarshalOptions object. Do not depend on the output being stable. It may // change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index f4b4686c..e942bc98 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -6,7 +6,7 @@ // See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, -// use the "google.golang.org/protobuf/proto" package instead. +// use the [google.golang.org/protobuf/proto] package instead. package protowire import ( @@ -87,7 +87,7 @@ func ParseError(n int) error { // ConsumeField parses an entire field record (both tag and value) and returns // the field number, the wire type, and the total length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). // // The total length includes the tag header and the end group marker (if the // field is a group). @@ -104,8 +104,8 @@ func ConsumeField(b []byte) (Number, Type, int) { } // ConsumeFieldValue parses a field value and returns its length. -// This assumes that the field Number and wire Type have already been parsed. -// This returns a negative length upon an error (see ParseError). +// This assumes that the field [Number] and wire [Type] have already been parsed. +// This returns a negative length upon an error (see [ParseError]). // // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. @@ -164,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte { } // ConsumeTag parses b as a varint-encoded tag, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeTag(b []byte) (Number, Type, int) { v, n := ConsumeVarint(b) if n < 0 { @@ -263,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte { } // ConsumeVarint parses b as a varint-encoded uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeVarint(b []byte) (v uint64, n int) { var y uint64 if len(b) <= 0 { @@ -384,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte { } // ConsumeFixed32 parses b as a little-endian uint32, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed32(b []byte) (v uint32, n int) { if len(b) < 4 { return 0, errCodeTruncated @@ -412,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte { } // ConsumeFixed64 parses b as a little-endian uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed64(b []byte) (v uint64, n int) { if len(b) < 8 { return 0, errCodeTruncated @@ -432,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte { } // ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeBytes(b []byte) (v []byte, n int) { m, n := ConsumeVarint(b) if n < 0 { @@ -456,7 +456,7 @@ func AppendString(b []byte, v string) []byte { } // ConsumeString parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeString(b []byte) (v string, n int) { bb, n := ConsumeBytes(b) return string(bb), n @@ -471,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte { // ConsumeGroup parses b as a group value until the trailing end group marker, // and verifies that the end marker matches the provided num. The value v // does not contain the end marker, while the length does contain the end marker. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeGroup(num Number, b []byte) (v []byte, n int) { n = ConsumeFieldValue(num, StartGroupType, b) if n < 0 { @@ -495,8 +495,8 @@ func SizeGroup(num Number, n int) int { return n + SizeTag(num) } -// DecodeTag decodes the field Number and wire Type from its unified form. -// The Number is -1 if the decoded field number overflows int32. +// DecodeTag decodes the field [Number] and wire [Type] from its unified form. +// The [Number] is -1 if the decoded field number overflows int32. // Other than overflow, this does not check for field number validity. func DecodeTag(x uint64) (Number, Type) { // NOTE: MessageSet allows for larger field numbers than normal. @@ -506,7 +506,7 @@ func DecodeTag(x uint64) (Number, Type) { return Number(x >> 3), Type(x & 7) } -// EncodeTag encodes the field Number and wire Type into its unified form. +// EncodeTag encodes the field [Number] and wire [Type] into its unified form. func EncodeTag(num Number, typ Type) uint64 { return uint64(num)<<3 | uint64(typ&7) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index db5248e1..a45625c8 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -83,7 +83,13 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records - rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + rv := reflect.ValueOf(vs.Get(i)) + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPublic"), "IsPublic"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + }...) ss = append(ss, "{"+rs.Join()+"}") } return start + joinStrings(ss, allowMulti) + end @@ -92,34 +98,26 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } } -// descriptorAccessors is a list of accessors to print for each descriptor. -// -// Do not print all accessors since some contain redundant information, -// while others are pointers that we do not want to follow since the descriptor -// is actually a cyclic graph. -// -// Using a list allows us to print the accessors in a sensible order. -var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +type methodAndName struct { + method reflect.Value + name string } func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { - io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil)) } -func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { + +func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { + return formatDescOpt(t, isRoot, allowMulti, record) +} + +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -129,26 +127,60 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { } _, isFile := t.(protoreflect.FileDescriptor) - rs := records{allowMulti: allowMulti} + rs := records{ + allowMulti: allowMulti, + record: record, + } if t.IsPlaceholder() { if isFile { - rs.Append(rv, "Path", "Package", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } else { - rs.Append(rv, "FullName", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("FullName"), "FullName"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } } else { switch { case isFile: - rs.Append(rv, "Syntax") + rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"}) case isRoot: - rs.Append(rv, "Syntax", "FullName") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Syntax"), "Syntax"}, + {rv.MethodByName("FullName"), "FullName"}, + }...) default: - rs.Append(rv, "Name") + rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"}) } switch t := t.(type) { case protoreflect.FieldDescriptor: - for _, s := range descriptorAccessors[rt] { - switch s { + accessors := []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + {rv.MethodByName("Cardinality"), "Cardinality"}, + {rv.MethodByName("Kind"), "Kind"}, + {rv.MethodByName("HasJSONName"), "HasJSONName"}, + {rv.MethodByName("JSONName"), "JSONName"}, + {rv.MethodByName("HasPresence"), "HasPresence"}, + {rv.MethodByName("IsExtension"), "IsExtension"}, + {rv.MethodByName("IsPacked"), "IsPacked"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + {rv.MethodByName("IsList"), "IsList"}, + {rv.MethodByName("IsMap"), "IsMap"}, + {rv.MethodByName("MapKey"), "MapKey"}, + {rv.MethodByName("MapValue"), "MapValue"}, + {rv.MethodByName("HasDefault"), "HasDefault"}, + {rv.MethodByName("Default"), "Default"}, + {rv.MethodByName("ContainingOneof"), "ContainingOneof"}, + {rv.MethodByName("ContainingMessage"), "ContainingMessage"}, + {rv.MethodByName("Message"), "Message"}, + {rv.MethodByName("Enum"), "Enum"}, + } + for _, s := range accessors { + switch s.name { case "MapKey": if k := t.MapKey(); k != nil { rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) @@ -157,20 +189,20 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { if v := t.MapValue(); v != nil { switch v.Kind() { case protoreflect.EnumKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())}) case protoreflect.MessageKind, protoreflect.GroupKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())}) default: - rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()}) } } case "ContainingOneof": if od := t.ContainingOneof(); od != nil { - rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())}) } case "ContainingMessage": if t.IsExtension() { - rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())}) } case "Message": if !t.IsMap() { @@ -187,13 +219,61 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { ss = append(ss, string(fs.Get(i).Name())) } if len(ss) > 0 { - rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) } - default: - rs.Append(rv, descriptorAccessors[rt]...) + + case protoreflect.FileDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("Imports"), "Imports"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + {rv.MethodByName("Services"), "Services"}, + }...) + + case protoreflect.MessageDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("IsMapEntry"), "IsMapEntry"}, + {rv.MethodByName("Fields"), "Fields"}, + {rv.MethodByName("Oneofs"), "Oneofs"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"}, + {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + }...) + + case protoreflect.EnumDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Values"), "Values"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + }...) + + case protoreflect.EnumValueDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + }...) + + case protoreflect.ServiceDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Methods"), "Methods"}, + }...) + + case protoreflect.MethodDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Input"), "Input"}, + {rv.MethodByName("Output"), "Output"}, + {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"}, + {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"}, + }...) } - if rv.MethodByName("GoType").IsValid() { - rs.Append(rv, "GoType") + if m := rv.MethodByName("GoType"); m.IsValid() { + rs.Append(rv, methodAndName{m, "GoType"}) } } return start + rs.Join() + end @@ -202,19 +282,34 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { type records struct { recs [][2]string allowMulti bool + + // record is a function that will be called for every Append() or + // AppendRecs() call, to be used for testing with the + // InternalFormatDescOptForTesting function. + record func(string) } -func (rs *records) Append(v reflect.Value, accessors ...string) { +func (rs *records) AppendRecs(fieldName string, newRecs [2]string) { + if rs.record != nil { + rs.record(fieldName) + } + rs.recs = append(rs.recs, newRecs) +} + +func (rs *records) Append(v reflect.Value, accessors ...methodAndName) { for _, a := range accessors { + if rs.record != nil { + rs.record(a.name) + } var rv reflect.Value - if m := v.MethodByName(a); m.IsValid() { - rv = m.Call(nil)[0] + if a.method.IsValid() { + rv = a.method.Call(nil)[0] } if v.Kind() == reflect.Struct && !rv.IsValid() { - rv = v.FieldByName(a) + rv = v.FieldByName(a.name) } if !rv.IsValid() { - panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name)) } if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] @@ -261,7 +356,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { default: s = fmt.Sprint(v) } - rs.recs = append(rs.recs, [2]string{a, s}) + rs.recs = append(rs.recs, [2]string{a.name, s}) } } diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go new file mode 100644 index 00000000..14656b65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editiondefaults contains the binary representation of the editions +// defaults. +package editiondefaults + +import _ "embed" + +//go:embed editions_defaults.binpb +var Defaults []byte diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb new file mode 100644 index 00000000..18f07568 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb @@ -0,0 +1,4 @@ + +  (0æ +  (0ç +  (0è æ(è \ No newline at end of file diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d043a6eb..d2b3ac03 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -121,7 +121,7 @@ func (d *Decoder) Read() (Token, error) { case ObjectClose: if len(d.openStack) == 0 || - d.lastToken.kind == comma || + d.lastToken.kind&(Name|comma) != 0 || d.openStack[len(d.openStack)-1] != ObjectOpen { return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 7c3689ba..8826bcf4 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -21,11 +21,26 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) +// Edition is an Enum for proto2.Edition +type Edition int32 + +// These values align with the value of Enum in descriptor.proto which allows +// direct conversion between the proto enum and this enum. +const ( + EditionUnknown Edition = 0 + EditionProto2 Edition = 998 + EditionProto3 Edition = 999 + Edition2023 Edition = 1000 + EditionUnsupported Edition = 100000 +) + // The types in this file may have a suffix: // • L0: Contains fields common to all descriptors (except File) and // must be initialized up front. // • L1: Contains fields specific to a descriptor and -// must be initialized up front. +// must be initialized up front. If the associated proto uses Editions, the +// Editions features must always be resolved. If not explicitly set, the +// appropriate default must be resolved and set. // • L2: Contains fields that are lazily initialized when constructing // from the raw file descriptor. When constructing as a literal, the L2 // fields must be initialized up front. @@ -44,6 +59,7 @@ type ( } FileL1 struct { Syntax protoreflect.Syntax + Edition Edition // Only used if Syntax == Editions Path string Package protoreflect.FullName @@ -51,12 +67,41 @@ type ( Messages Messages Extensions Extensions Services Services + + EditionFeatures EditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } + + EditionFeatures struct { + // IsFieldPresence is true if field_presence is EXPLICIT + // https://protobuf.dev/editions/features/#field_presence + IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED + // https://protobuf.dev/editions/features/#field_presence + IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN + // https://protobuf.dev/editions/features/#enum_type + IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED + // https://protobuf.dev/editions/features/#repeated_field_encoding + IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY + // https://protobuf.dev/editions/features/#utf8_validation + IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED + // https://protobuf.dev/editions/features/#message_encoding + IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW + // https://protobuf.dev/editions/features/#json_format + IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the + // UnmarshalJSON([]byte) error method for enums. + GenerateLegacyUnmarshalJSON bool + } ) func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } @@ -117,6 +162,8 @@ type ( } EnumL1 struct { eagerValues bool // controls whether EnumL2.Values is already populated + + EditionFeatures EditionFeatures } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -178,6 +225,8 @@ type ( Extensions Extensions IsMapEntry bool // promoted from google.protobuf.MessageOptions IsMessageSet bool // promoted from google.protobuf.MessageOptions + + EditionFeatures EditionFeatures } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -210,6 +259,8 @@ type ( ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor + + EditionFeatures EditionFeatures } Oneof struct { @@ -219,6 +270,8 @@ type ( OneofL1 struct { Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + + EditionFeatures EditionFeatures } ) @@ -268,23 +321,36 @@ func (fd *Field) Options() protoreflect.ProtoMessage { } func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Kind() protoreflect.Kind { + return fd.L1.Kind +} +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + if fd.L1.Cardinality == protoreflect.Repeated { + return false + } + explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence + return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { - switch fd.L1.Kind { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - default: - return true - } + if fd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch fd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.EditionFeatures.IsPacked + } + if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 { + // proto3 repeated fields are packed by default. + return !fd.L1.HasPacked || fd.L1.IsPacked } return fd.L1.IsPacked } @@ -333,6 +399,9 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.EditionFeatures.IsUTF8Validated + } if fd.L1.HasEnforceUTF8 { return fd.L1.EnforceUTF8 } @@ -359,10 +428,11 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number protoreflect.FieldNumber - Extendee protoreflect.MessageDescriptor - Cardinality protoreflect.Cardinality - Kind protoreflect.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind + EditionFeatures EditionFeatures } ExtensionL2 struct { Options func() protoreflect.ProtoMessage diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 4a1584c9..237e64fd 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -5,6 +5,7 @@ package filedesc import ( + "fmt" "sync" "google.golang.org/protobuf/encoding/protowire" @@ -98,6 +99,7 @@ func (fd *File) unmarshalSeed(b []byte) { var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int + var options []byte b0 := b for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -113,6 +115,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Syntax = protoreflect.Proto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + case "editions": + fd.L1.Syntax = protoreflect.Editions default: panic("invalid syntax") } @@ -120,6 +124,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_Options_field_number: + options = v case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -154,6 +160,13 @@ func (fd *File) unmarshalSeed(b []byte) { numServices++ } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Edition_field_number: + fd.L1.Edition = Edition(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -166,6 +179,15 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Syntax = protoreflect.Proto2 } + if fd.L1.Syntax == protoreflect.Editions { + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) + } + + // Parse editions features from options if any + if options != nil { + fd.unmarshalSeedOptions(options) + } + // Must allocate all declarations before parsing each descriptor type // to ensure we handled all descriptors in "flattened ordering". if numEnums > 0 { @@ -219,6 +241,28 @@ func (fd *File) unmarshalSeed(b []byte) { } } +func (fd *File) unmarshalSeedOptions(b []byte) { + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileOptions_Features_field_number: + if fd.Syntax() != protoreflect.Editions { + panic(fmt.Sprintf("invalid descriptor: using edition features in a proto with syntax %s", fd.Syntax())) + } + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd @@ -275,6 +319,7 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i + md.L1.EditionFeatures = featuresFromParentDesc(md.Parent()) var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int @@ -380,6 +425,13 @@ func (md *Message) unmarshalSeedOptions(b []byte) { case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MessageOptions_Features_field_number: + md.L1.EditionFeatures = unmarshalFeatureSet(v, md.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 736a19a7..482a61cc 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -414,6 +414,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i + fd.L1.EditionFeatures = featuresFromParentDesc(fd.Parent()) var rawTypeName []byte var rawOptions []byte @@ -465,6 +466,12 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } + if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + fd.L1.Kind = protoreflect.GroupKind + } + if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired { + fd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { @@ -497,6 +504,13 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.HasEnforceUTF8 = true fd.L1.EnforceUTF8 = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -534,6 +548,7 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { var rawTypeName []byte var rawOptions []byte + xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee) xd.L2 = new(ExtensionL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -565,6 +580,12 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] } } + if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } + if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired { + xd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { @@ -589,6 +610,13 @@ func (xd *Extension) unmarshalOptions(b []byte) { case genid.FieldOptions_Packed_field_number: xd.L2.IsPacked = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go new file mode 100644 index 00000000..0375a49d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -0,0 +1,142 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var defaultsCache = make(map[Edition]EditionFeatures) + +func init() { + unmarshalEditionDefaults(editiondefaults.Defaults) +} + +func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + } + } + return parent +} + +func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSet_FieldPresence_field_number: + parent.IsFieldPresence = v == genid.FeatureSet_EXPLICIT_enum_value || v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + parent.IsLegacyRequired = v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + case genid.FeatureSet_EnumType_field_number: + parent.IsOpenEnum = v == genid.FeatureSet_OPEN_enum_value + case genid.FeatureSet_RepeatedFieldEncoding_field_number: + parent.IsPacked = v == genid.FeatureSet_PACKED_enum_value + case genid.FeatureSet_Utf8Validation_field_number: + parent.IsUTF8Validated = v == genid.FeatureSet_VERIFY_enum_value + case genid.FeatureSet_MessageEncoding_field_number: + parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value + case genid.FeatureSet_JsonFormat_field_number: + parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + parent = unmarshalGoFeature(v, parent) + } + } + } + + return parent +} + +func featuresFromParentDesc(parentDesc protoreflect.Descriptor) EditionFeatures { + var parentFS EditionFeatures + switch p := parentDesc.(type) { + case *File: + parentFS = p.L1.EditionFeatures + case *Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + return parentFS +} + +func unmarshalEditionDefault(b []byte) { + var ed Edition + var fs EditionFeatures + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number: + ed = Edition(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number: + fs = unmarshalFeatureSet(v, fs) + } + } + } + defaultsCache[ed] = fs +} + +func unmarshalEditionDefaults(b []byte) { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.FeatureSetDefaults_Defaults_field_number: + def, m := protowire.ConsumeBytes(b) + b = b[m:] + unmarshalEditionDefault(def) + case genid.FeatureSetDefaults_MinimumEdition_field_number, + genid.FeatureSetDefaults_MaximumEdition_field_number: + // We don't care about the minimum and maximum editions. If the + // edition we are looking for later on is not in the cache we know + // it is outside of the range between minimum and maximum edition. + _, m := protowire.ConsumeVarint(b) + b = b[m:] + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + } + } +} + +func getFeaturesFor(ed Edition) EditionFeatures { + if def, ok := defaultsCache[ed]; ok { + return def + } + panic(fmt.Sprintf("unsupported edition: %v", ed)) +} diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 136f1b21..40272c89 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -12,6 +12,27 @@ import ( const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" +// Full and short names for google.protobuf.Edition. +const ( + Edition_enum_fullname = "google.protobuf.Edition" + Edition_enum_name = "Edition" +) + +// Enum values for google.protobuf.Edition. +const ( + Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_PROTO2_enum_value = 998 + Edition_EDITION_PROTO3_enum_value = 999 + Edition_EDITION_2023_enum_value = 1000 + Edition_EDITION_2024_enum_value = 1001 + Edition_EDITION_1_TEST_ONLY_enum_value = 1 + Edition_EDITION_2_TEST_ONLY_enum_value = 2 + Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 + Edition_EDITION_99998_TEST_ONLY_enum_value = 99998 + Edition_EDITION_99999_TEST_ONLY_enum_value = 99999 + Edition_EDITION_MAX_enum_value = 2147483647 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -81,7 +102,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 - FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14 ) // Names for google.protobuf.DescriptorProto. @@ -184,10 +205,12 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Features_field_name protoreflect.Name = "features" ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features" ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) @@ -195,6 +218,7 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50 ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 ) @@ -204,6 +228,12 @@ const ( ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" ) +// Enum values for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_DECLARATION_enum_value = 0 + ExtensionRangeOptions_UNVERIFIED_enum_value = 1 +) + // Names for google.protobuf.ExtensionRangeOptions.Declaration. const ( ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" @@ -212,29 +242,26 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" - ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" - ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" - ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" - ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" - ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" - ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" - ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" - ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" - ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" ) // Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 - ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 - ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 - ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 - ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 - ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -291,12 +318,41 @@ const ( FieldDescriptorProto_Type_enum_name = "Type" ) +// Enum values for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_TYPE_DOUBLE_enum_value = 1 + FieldDescriptorProto_TYPE_FLOAT_enum_value = 2 + FieldDescriptorProto_TYPE_INT64_enum_value = 3 + FieldDescriptorProto_TYPE_UINT64_enum_value = 4 + FieldDescriptorProto_TYPE_INT32_enum_value = 5 + FieldDescriptorProto_TYPE_FIXED64_enum_value = 6 + FieldDescriptorProto_TYPE_FIXED32_enum_value = 7 + FieldDescriptorProto_TYPE_BOOL_enum_value = 8 + FieldDescriptorProto_TYPE_STRING_enum_value = 9 + FieldDescriptorProto_TYPE_GROUP_enum_value = 10 + FieldDescriptorProto_TYPE_MESSAGE_enum_value = 11 + FieldDescriptorProto_TYPE_BYTES_enum_value = 12 + FieldDescriptorProto_TYPE_UINT32_enum_value = 13 + FieldDescriptorProto_TYPE_ENUM_enum_value = 14 + FieldDescriptorProto_TYPE_SFIXED32_enum_value = 15 + FieldDescriptorProto_TYPE_SFIXED64_enum_value = 16 + FieldDescriptorProto_TYPE_SINT32_enum_value = 17 + FieldDescriptorProto_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.FieldDescriptorProto.Label. const ( FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" FieldDescriptorProto_Label_enum_name = "Label" ) +// Enum values for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_LABEL_OPTIONAL_enum_value = 1 + FieldDescriptorProto_LABEL_REPEATED_enum_value = 3 + FieldDescriptorProto_LABEL_REQUIRED_enum_value = 2 +) + // Names for google.protobuf.OneofDescriptorProto. const ( OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" @@ -468,7 +524,6 @@ const ( FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" - FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" @@ -478,6 +533,7 @@ const ( FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_Features_field_name protoreflect.Name = "features" FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" @@ -490,7 +546,6 @@ const ( FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" - FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" @@ -500,6 +555,7 @@ const ( FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features" FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" ) @@ -515,7 +571,6 @@ const ( FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 - FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 @@ -525,6 +580,7 @@ const ( FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_Features_field_number protoreflect.FieldNumber = 50 FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -534,6 +590,13 @@ const ( FileOptions_OptimizeMode_enum_name = "OptimizeMode" ) +// Enum values for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_SPEED_enum_value = 1 + FileOptions_CODE_SIZE_enum_value = 2 + FileOptions_LITE_RUNTIME_enum_value = 3 +) + // Names for google.protobuf.MessageOptions. const ( MessageOptions_message_name protoreflect.Name = "MessageOptions" @@ -547,6 +610,7 @@ const ( MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_name protoreflect.Name = "features" MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" @@ -554,6 +618,7 @@ const ( MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features" MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) @@ -564,6 +629,7 @@ const ( MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_Features_field_number protoreflect.FieldNumber = 12 MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -584,8 +650,9 @@ const ( FieldOptions_Weak_field_name protoreflect.Name = "weak" FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" - FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_Targets_field_name protoreflect.Name = "targets" + FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" + FieldOptions_Features_field_name protoreflect.Name = "features" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -597,8 +664,9 @@ const ( FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" - FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" + FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" + FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -613,8 +681,9 @@ const ( FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 - FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 + FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 + FieldOptions_Features_field_number protoreflect.FieldNumber = 21 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -624,24 +693,80 @@ const ( FieldOptions_CType_enum_name = "CType" ) +// Enum values for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_STRING_enum_value = 0 + FieldOptions_CORD_enum_value = 1 + FieldOptions_STRING_PIECE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.JSType. const ( FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" FieldOptions_JSType_enum_name = "JSType" ) +// Enum values for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JS_NORMAL_enum_value = 0 + FieldOptions_JS_STRING_enum_value = 1 + FieldOptions_JS_NUMBER_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionRetention. const ( FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" FieldOptions_OptionRetention_enum_name = "OptionRetention" ) +// Enum values for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_RETENTION_UNKNOWN_enum_value = 0 + FieldOptions_RETENTION_RUNTIME_enum_value = 1 + FieldOptions_RETENTION_SOURCE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionTargetType. const ( FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) +// Enum values for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_TARGET_TYPE_UNKNOWN_enum_value = 0 + FieldOptions_TARGET_TYPE_FILE_enum_value = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE_enum_value = 2 + FieldOptions_TARGET_TYPE_MESSAGE_enum_value = 3 + FieldOptions_TARGET_TYPE_FIELD_enum_value = 4 + FieldOptions_TARGET_TYPE_ONEOF_enum_value = 5 + FieldOptions_TARGET_TYPE_ENUM_enum_value = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY_enum_value = 7 + FieldOptions_TARGET_TYPE_SERVICE_enum_value = 8 + FieldOptions_TARGET_TYPE_METHOD_enum_value = 9 +) + +// Names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" + FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault" +) + +// Field names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition" + FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value" + + FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition" + FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value" +) + +// Field numbers for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -650,13 +775,16 @@ const ( // Field names for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_name protoreflect.Name = "features" OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features" OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" ) // Field numbers for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_number protoreflect.FieldNumber = 1 OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -671,11 +799,13 @@ const ( EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_name protoreflect.Name = "features" EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features" EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) @@ -684,6 +814,7 @@ const ( EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_Features_field_number protoreflect.FieldNumber = 7 EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -696,15 +827,21 @@ const ( // Field names for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_Features_field_name protoreflect.Name = "features" + EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" + EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 + EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -716,15 +853,18 @@ const ( // Field names for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_name protoreflect.Name = "features" ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features" ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" ) // Field numbers for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_number protoreflect.FieldNumber = 34 ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -739,10 +879,12 @@ const ( const ( MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_Features_field_name protoreflect.Name = "features" MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features" MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" ) @@ -750,6 +892,7 @@ const ( const ( MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_Features_field_number protoreflect.FieldNumber = 35 MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -759,6 +902,13 @@ const ( MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" ) +// Enum values for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN_enum_value = 0 + MethodOptions_NO_SIDE_EFFECTS_enum_value = 1 + MethodOptions_IDEMPOTENT_enum_value = 2 +) + // Names for google.protobuf.UninterpretedOption. const ( UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" @@ -816,6 +966,163 @@ const ( UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FeatureSet. +const ( + FeatureSet_message_name protoreflect.Name = "FeatureSet" + FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet" +) + +// Field names for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" +) + +// Field numbers for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 +) + +// Full and short names for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence" + FeatureSet_FieldPresence_enum_name = "FieldPresence" +) + +// Enum values for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN_enum_value = 0 + FeatureSet_EXPLICIT_enum_value = 1 + FeatureSet_IMPLICIT_enum_value = 2 + FeatureSet_LEGACY_REQUIRED_enum_value = 3 +) + +// Full and short names for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" + FeatureSet_EnumType_enum_name = "EnumType" +) + +// Enum values for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_ENUM_TYPE_UNKNOWN_enum_value = 0 + FeatureSet_OPEN_enum_value = 1 + FeatureSet_CLOSED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" + FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" +) + +// Enum values for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_PACKED_enum_value = 1 + FeatureSet_EXPANDED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" + FeatureSet_Utf8Validation_enum_name = "Utf8Validation" +) + +// Enum values for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN_enum_value = 0 + FeatureSet_VERIFY_enum_value = 2 + FeatureSet_NONE_enum_value = 3 +) + +// Full and short names for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" + FeatureSet_MessageEncoding_enum_name = "MessageEncoding" +) + +// Enum values for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_LENGTH_PREFIXED_enum_value = 1 + FeatureSet_DELIMITED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" + FeatureSet_JsonFormat_enum_name = "JsonFormat" +) + +// Enum values for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JSON_FORMAT_UNKNOWN_enum_value = 0 + FeatureSet_ALLOW_enum_value = 1 + FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 +) + +// Names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" + FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults" +) + +// Field names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults" + FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition" + FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition" + + FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults" + FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition" + FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition" +) + +// Field numbers for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1 + FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault" + FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" +) + +// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" +) + +// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.SourceCodeInfo. const ( SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" @@ -917,3 +1224,10 @@ const ( GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) + +// Enum values for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_NONE_enum_value = 0 + GeneratedCodeInfo_Annotation_SET_enum_value = 1 + GeneratedCodeInfo_Annotation_ALIAS_enum_value = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go new file mode 100644 index 00000000..fd9015e8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto" + +// Names for google.protobuf.GoFeatures. +const ( + GoFeatures_message_name protoreflect.Name = "GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" +) + +// Field names for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" +) + +// Field numbers for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go index 1a38944b..ad6f80c4 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -18,6 +18,11 @@ const ( NullValue_enum_name = "NullValue" ) +// Enum values for google.protobuf.NullValue. +const ( + NullValue_NULL_VALUE_enum_value = 0 +) + // Names for google.protobuf.Struct. const ( Struct_message_name protoreflect.Name = "Struct" diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index e0f75fea..49bc73e2 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -18,6 +18,13 @@ const ( Syntax_enum_name = "Syntax" ) +// Enum values for google.protobuf.Syntax. +const ( + Syntax_SYNTAX_PROTO2_enum_value = 0 + Syntax_SYNTAX_PROTO3_enum_value = 1 + Syntax_SYNTAX_EDITIONS_enum_value = 2 +) + // Names for google.protobuf.Type. const ( Type_message_name protoreflect.Name = "Type" @@ -105,12 +112,43 @@ const ( Field_Kind_enum_name = "Kind" ) +// Enum values for google.protobuf.Field.Kind. +const ( + Field_TYPE_UNKNOWN_enum_value = 0 + Field_TYPE_DOUBLE_enum_value = 1 + Field_TYPE_FLOAT_enum_value = 2 + Field_TYPE_INT64_enum_value = 3 + Field_TYPE_UINT64_enum_value = 4 + Field_TYPE_INT32_enum_value = 5 + Field_TYPE_FIXED64_enum_value = 6 + Field_TYPE_FIXED32_enum_value = 7 + Field_TYPE_BOOL_enum_value = 8 + Field_TYPE_STRING_enum_value = 9 + Field_TYPE_GROUP_enum_value = 10 + Field_TYPE_MESSAGE_enum_value = 11 + Field_TYPE_BYTES_enum_value = 12 + Field_TYPE_UINT32_enum_value = 13 + Field_TYPE_ENUM_enum_value = 14 + Field_TYPE_SFIXED32_enum_value = 15 + Field_TYPE_SFIXED64_enum_value = 16 + Field_TYPE_SINT32_enum_value = 17 + Field_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.Field.Cardinality. const ( Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" Field_Cardinality_enum_name = "Cardinality" ) +// Enum values for google.protobuf.Field.Cardinality. +const ( + Field_CARDINALITY_UNKNOWN_enum_value = 0 + Field_CARDINALITY_OPTIONAL_enum_value = 1 + Field_CARDINALITY_REQUIRED_enum_value = 2 + Field_CARDINALITY_REPEATED_enum_value = 3 +) + // Names for google.protobuf.Enum. const ( Enum_message_name protoreflect.Name = "Enum" diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index e74cefdc..2b8f122c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -21,26 +21,18 @@ type extensionFieldInfo struct { validation validationInfo } -var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo - func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info } - return legacyLoadExtensionFieldInfo(xt) -} - -// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { - if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { - return xi.(*extensionFieldInfo) - } - e := makeExtensionFieldInfo(xt.TypeDescriptor()) - if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { - return e.(*extensionFieldInfo) - } - return e + // Ideally we'd cache the resulting *extensionFieldInfo so we don't have to + // recompute this metadata repeatedly. But without support for something like + // weak references, such a cache would pin temporary values (like dynamic + // extension types, constructed for the duration of a user request) to the + // heap forever, causing memory usage of the cache to grow unbounded. + // See discussion in https://github.com/golang/protobuf/issues/1521. + return makeExtensionFieldInfo(xt.TypeDescriptor()) } func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index 1a509b63..f55dc01e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -162,11 +162,20 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growBoolSlice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -732,11 +741,20 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1138,11 +1156,20 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1544,11 +1571,20 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1950,11 +1986,20 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2356,11 +2401,20 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2762,11 +2816,20 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -3145,11 +3208,15 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3461,11 +3528,15 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3777,11 +3848,15 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growFloat32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -4093,11 +4168,15 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4409,11 +4488,15 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4725,11 +4808,15 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growFloat64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index 576dcf3a..13077751 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -197,7 +197,7 @@ func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: + case !fd.HasPresence() && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 61c483fa..2ab2c629 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -206,13 +206,18 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := t.MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - for _, v := range vs { - oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) - } + methods := make([]reflect.Method, 0, 2) + if m, ok := t.MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := t.MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 4f5fb67a..629bacdc 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -192,12 +192,17 @@ fieldLoop: // Derive a mapping of oneof wrappers to fields. oneofWrappers := mi.OneofWrappers - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - oneofWrappers = vs - } + methods := make([]reflect.Method, 0, 2) + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs } } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 5e736c60..986322b1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -538,6 +538,6 @@ func isZero(v reflect.Value) bool { } return true default: - panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 4c491bdf..517e9443 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -159,6 +159,42 @@ func (p pointer) SetPointer(v pointer) { p.v.Elem().Set(v.v) } +func growSlice(p pointer, addCap int) { + // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. + in := p.v.Elem() + out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) + reflect.Copy(out, in) + p.v.Elem().Set(out) +} + +func (p pointer) growBoolSlice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + growSlice(p, addCap) +} + func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } func (ms *messageState) pointer() pointer { panic("not supported") } func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index ee0e0573..4b020e31 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -138,6 +138,46 @@ func (p pointer) SetPointer(v pointer) { *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) } +func (p pointer) growBoolSlice(addCap int) { + sp := p.BoolSlice() + s := make([]bool, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growInt32Slice(addCap int) { + sp := p.Int32Slice() + s := make([]int32, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + sp := p.Int64Slice() + s := make([]int64, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint64Slice(addCap int) { + p.growInt64Slice(addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + p.growInt64Slice(addCap) +} + // Static check that MessageState does not exceed the size of a pointer. const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go index 0b74e765..a6e7df24 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -17,7 +17,7 @@ import ( // EnforceUTF8 reports whether to enforce strict UTF-8 validation. func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { - if flags.ProtoLegacy { + if flags.ProtoLegacy || fd.Syntax() == protoreflect.Editions { if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { return fd.EnforceUTF8() } diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go similarity index 96% rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index 61a84d34..a008acd0 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go new file mode 100644 index 00000000..60166f2b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -0,0 +1,74 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package strs + +import ( + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return protoreflect.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 0999f29d..a50fcfb4 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 31 + Minor = 33 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 48d47946..e5b03b56 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -69,7 +69,7 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // UnmarshalState parses a wire-format message and places the result in m. // // This method permits fine-grained control over the unmarshaler. -// Most users should use Unmarshal instead. +// Most users should use [Unmarshal] instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { if o.RecursionLimit == 0 { o.RecursionLimit = protowire.DefaultRecursionLimit diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index ec71e717..80ed16a0 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -18,27 +18,27 @@ // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. // -// • Size reports the size of a message in the wire format. +// - [Size] reports the size of a message in the wire format. // -// • Marshal converts a message to the wire format. -// The MarshalOptions type provides more control over wire marshaling. +// - [Marshal] converts a message to the wire format. +// The [MarshalOptions] type provides more control over wire marshaling. // -// • Unmarshal converts a message from the wire format. -// The UnmarshalOptions type provides more control over wire unmarshaling. +// - [Unmarshal] converts a message from the wire format. +// The [UnmarshalOptions] type provides more control over wire unmarshaling. // // # Basic message operations // -// • Clone makes a deep copy of a message. +// - [Clone] makes a deep copy of a message. // -// • Merge merges the content of a message into another. +// - [Merge] merges the content of a message into another. // -// • Equal compares two messages. For more control over comparisons -// and detailed reporting of differences, see package -// "google.golang.org/protobuf/testing/protocmp". +// - [Equal] compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// [google.golang.org/protobuf/testing/protocmp]. // -// • Reset clears the content of a message. +// - [Reset] clears the content of a message. // -// • CheckInitialized reports whether all required fields in a message are set. +// - [CheckInitialized] reports whether all required fields in a message are set. // // # Optional scalar constructors // @@ -46,9 +46,9 @@ // as pointers to a value. For example, an optional string field has the // Go type *string. // -// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String -// take a value and return a pointer to a new instance of it, -// to simplify construction of optional field values. +// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String] +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. // // Generated enum types usually have an Enum method which performs the // same operation. @@ -57,29 +57,29 @@ // // # Extension accessors // -// • HasExtension, GetExtension, SetExtension, and ClearExtension -// access extension field values in a protocol buffer message. +// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension] +// access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // // # Related packages // -// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to -// and from JSON. +// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to +// and from JSON. // -// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to -// and from the text format. +// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to +// and from the text format. // -// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a -// reflection interface for protocol buffer data types. +// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a +// reflection interface for protocol buffer data types. // -// • Package "google.golang.org/protobuf/testing/protocmp" provides features -// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" -// package. +// - Package [google.golang.org/protobuf/testing/protocmp] provides features +// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp] +// package. // -// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic -// message type, suitable for working with messages where the protocol buffer -// type is only known at runtime. +// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. // // This module contains additional packages for more specialized use cases. // Consult the individual package documentation for details. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index bf7f816d..4fed202f 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -129,7 +129,7 @@ func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // MarshalState returns the wire-format encoding of a message. // // This method permits fine-grained control over the marshaler. -// Most users should use Marshal instead. +// Most users should use [Marshal] instead. func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { return o.marshal(in.Buf, in.Message) } diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 5f293cda..17899a3a 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -26,7 +26,7 @@ func HasExtension(m Message, xt protoreflect.ExtensionType) bool { } // ClearExtension clears an extension field such that subsequent -// HasExtension calls return false. +// [HasExtension] calls return false. // It panics if m is invalid or if xt does not extend m. func ClearExtension(m Message, xt protoreflect.ExtensionType) { m.ProtoReflect().Clear(xt.TypeDescriptor()) diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index d761ab33..3c6fe578 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -21,7 +21,7 @@ import ( // The unknown fields of src are appended to the unknown fields of dst. // // It is semantically equivalent to unmarshaling the encoded form of src -// into dst with the UnmarshalOptions.Merge option specified. +// into dst with the [UnmarshalOptions.Merge] option specified. func Merge(dst, src Message) { // TODO: Should nil src be treated as semantically equivalent to a // untyped, read-only, empty message? What about a nil dst? diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index 1f0d183b..7543ee6b 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -15,18 +15,20 @@ import ( // protobuf module that accept a Message, except where otherwise specified. // // This is the v2 interface definition for protobuf messages. -// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// The v1 interface definition is [github.com/golang/protobuf/proto.Message]. // -// To convert a v1 message to a v2 message, -// use "github.com/golang/protobuf/proto".MessageV2. -// To convert a v2 message to a v1 message, -// use "github.com/golang/protobuf/proto".MessageV1. +// - To convert a v1 message to a v2 message, +// use [google.golang.org/protobuf/protoadapt.MessageV2Of]. +// - To convert a v2 message to a v1 message, +// use [google.golang.org/protobuf/protoadapt.MessageV1Of]. type Message = protoreflect.ProtoMessage -// Error matches all errors produced by packages in the protobuf module. +// Error matches all errors produced by packages in the protobuf module +// according to [errors.Is]. // -// That is, errors.Is(err, Error) reports whether an error is produced -// by this module. +// Example usage: +// +// if errors.Is(err, proto.Error) { ... } var Error error func init() { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index e4dfb120..baa0cc62 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -3,11 +3,11 @@ // license that can be found in the LICENSE file. // Package protodesc provides functionality for converting -// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values. // // The google.protobuf.FileDescriptorProto is a protobuf message that describes // the type information for a .proto file in a form that is easily serializable. -// The protoreflect.FileDescriptor is a more structured representation of +// The [protoreflect.FileDescriptor] is a more structured representation of // the FileDescriptorProto message where references and remote dependencies // can be directly followed. package protodesc @@ -24,11 +24,11 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// Resolver is the resolver used by NewFile to resolve dependencies. +// Resolver is the resolver used by [NewFile] to resolve dependencies. // The enums and messages provided must belong to some parent file, // which is also registered. // -// It is implemented by protoregistry.Files. +// It is implemented by [protoregistry.Files]. type Resolver interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) @@ -61,19 +61,19 @@ type FileOptions struct { AllowUnresolvable bool } -// NewFile creates a new protoreflect.FileDescriptor from the provided -// file descriptor message. See FileOptions.New for more information. +// NewFile creates a new [protoreflect.FileDescriptor] from the provided +// file descriptor message. See [FileOptions.New] for more information. func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { return FileOptions{}.New(fd, r) } -// NewFiles creates a new protoregistry.Files from the provided -// FileDescriptorSet message. See FileOptions.NewFiles for more information. +// NewFiles creates a new [protoregistry.Files] from the provided +// FileDescriptorSet message. See [FileOptions.NewFiles] for more information. func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { return FileOptions{}.NewFiles(fd) } -// New creates a new protoreflect.FileDescriptor from the provided +// New creates a new [protoreflect.FileDescriptor] from the provided // file descriptor message. The file must represent a valid proto file according // to protobuf semantics. The returned descriptor is a deep copy of the input. // @@ -93,9 +93,15 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot f.L1.Syntax = protoreflect.Proto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + case "editions": + f.L1.Syntax = protoreflect.Editions + f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") @@ -108,6 +114,9 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } + if f.L1.Syntax == protoreflect.Editions { + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) + } f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -231,7 +240,7 @@ func (is importSet) importPublic(imps protoreflect.FileImports) { } } -// NewFiles creates a new protoregistry.Files from the provided +// NewFiles creates a new [protoregistry.Files] from the provided // FileDescriptorSet message. The descriptor set must include only // valid files according to protobuf semantics. The returned descriptors // are a deep copy of the input. diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 37efda1a..b3278163 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -28,6 +28,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt opts = proto.Clone(opts).(*descriptorpb.EnumOptions) e.L2.Options = func() protoreflect.ProtoMessage { return opts } } + e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) for _, s := range ed.GetReservedName() { e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) } @@ -68,6 +69,9 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } + if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) + } if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -114,6 +118,27 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt return ms, nil } +// canBePacked returns whether the field can use packed encoding: +// https://protobuf.dev/programming-guides/encoding/#packed +func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool { + if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false // not a repeated field + } + + switch protoreflect.Kind(fd.GetType()) { + case protoreflect.MessageKind, protoreflect.GroupKind: + return false // not a scalar type field + + case protoreflect.StringKind, protoreflect.BytesKind: + // string and bytes can explicitly not be declared as packed, + // see https://protobuf.dev/programming-guides/encoding/#packed + return false + + default: + return true + } +} + func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers for i, fd := range fds { @@ -137,6 +162,34 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if fd.JsonName != nil { f.L1.StringName.InitJSON(fd.GetJsonName()) } + + if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) + + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } + // We reuse the existing field because the old option `[packed = + // true]` is mutually exclusive with the editions feature. + if canBePacked(fd) { + f.L1.HasPacked = true + f.L1.IsPacked = f.L1.EditionFeatures.IsPacked + } + + // We pretend this option is always explicitly set because the only + // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8 + // or to return the appropriate default. + // When using editions we either parse the option or resolve the + // appropriate default here (instead of later when this option is + // requested from the descriptor). + // In proto2/proto3 syntax HasEnforceUTF8 might be false. + f.L1.HasEnforceUTF8 = true + f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated + + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { + f.L1.Kind = protoreflect.GroupKind + } + } } return fs, nil } @@ -151,6 +204,9 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } + if parent.Syntax() == protoreflect.Editions { + o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures()) + } } } return os, nil diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 27d7e350..254ca585 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -276,8 +276,8 @@ func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvab } else if err != nil { return v, ev, err } - if fd.Syntax() == protoreflect.Proto3 { - return v, ev, errors.New("cannot be specified under proto3 semantics") + if !fd.HasPresence() { + return v, ev, errors.New("cannot be specified with implicit field presence") } if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { return v, ev, errors.New("cannot be specified on composite types") diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 9af1d564..e4dcaf87 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -107,7 +107,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } if m.Syntax() == protoreflect.Proto3 { @@ -314,8 +314,8 @@ func checkValidGroup(fd protoreflect.FieldDescriptor) error { switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() != protoreflect.Proto2: - return errors.New("invalid under proto2 semantics") + case fd.Syntax() == protoreflect.Proto3: + return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") case fd.FullName().Parent() != md.FullName().Parent(): diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go new file mode 100644 index 00000000..2a6b29d1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -0,0 +1,148 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "os" + "sync" + + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" +) + +const ( + SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 + SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 +) + +var defaults = &descriptorpb.FeatureSetDefaults{} +var defaultsCacheMu sync.Mutex +var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) + +func init() { + err := proto.Unmarshal(editiondefaults.Defaults, defaults) + if err != nil { + fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) + os.Exit(1) + } +} + +func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition { + return filedesc.Edition(epb) +} + +func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { + switch ed { + case filedesc.EditionUnknown: + return descriptorpb.Edition_EDITION_UNKNOWN + case filedesc.EditionProto2: + return descriptorpb.Edition_EDITION_PROTO2 + case filedesc.EditionProto3: + return descriptorpb.Edition_EDITION_PROTO3 + case filedesc.Edition2023: + return descriptorpb.Edition_EDITION_2023 + default: + panic(fmt.Sprintf("unknown value for edition: %v", ed)) + } +} + +func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { + defaultsCacheMu.Lock() + defer defaultsCacheMu.Unlock() + if def, ok := defaultsCache[ed]; ok { + return def + } + edpb := toEditionProto(ed) + if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { + // This should never happen protodesc.(FileOptions).New would fail when + // initializing the file descriptor. + // This most likely means the embedded defaults were not updated. + fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) + os.Exit(1) + } + fs := defaults.GetDefaults()[0].GetFeatures() + // Using a linear search for now. + // Editions are guaranteed to be sorted and thus we could use a binary search. + // Given that there are only a handful of editions (with one more per year) + // there is not much reason to use a binary search. + for _, def := range defaults.GetDefaults() { + if def.GetEdition() <= edpb { + fs = def.GetFeatures() + } else { + break + } + } + defaultsCache[ed] = fs + return fs +} + +// mergeEditionFeatures merges the parent and child feature sets. This function +// should be used when initializing Go descriptors from descriptor protos which +// is why the parent is a filedesc.EditionsFeatures (Go representation) while +// the child is a descriptorproto.FeatureSet (protoc representation). +// Any feature set by the child overwrites what is set by the parent. +func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures { + var parentFS filedesc.EditionFeatures + switch p := parentDesc.(type) { + case *filedesc.File: + parentFS = p.L1.EditionFeatures + case *filedesc.Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + if child == nil { + return parentFS + } + if fp := child.FieldPresence; fp != nil { + parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED || + *fp == descriptorpb.FeatureSet_EXPLICIT + parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED + } + if et := child.EnumType; et != nil { + parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN + } + + if rfe := child.RepeatedFieldEncoding; rfe != nil { + parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED + } + + if utf8val := child.Utf8Validation; utf8val != nil { + parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY + } + + if me := child.MessageEncoding; me != nil { + parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED + } + + if jf := child.JsonFormat; jf != nil { + parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW + } + + if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { + if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { + parentFS.GenerateLegacyUnmarshalJSON = *luje + } + } + + return parentFS +} + +// initFileDescFromFeatureSet initializes editions related fields in fd based +// on fs. If fs is nil it is assumed to be an empty featureset and all fields +// will be initialized with the appropriate default. fd.L1.Edition must be set +// before calling this function. +func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { + dfs := getFeatureSetFor(fd.L1.Edition) + // initialize the featureset with the defaults + fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs) + // overwrite any options explicitly specified + fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a7c5ceff..9d6e0542 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -16,7 +16,7 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a // google.protobuf.FileDescriptorProto message. func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { p := &descriptorpb.FileDescriptorProto{ @@ -70,13 +70,13 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) } - if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } return p } -// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a // google.protobuf.DescriptorProto message. func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { p := &descriptorpb.DescriptorProto{ @@ -119,7 +119,7 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des return p } -// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a // google.protobuf.FieldDescriptorProto message. func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { p := &descriptorpb.FieldDescriptorProto{ @@ -168,7 +168,7 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi return p } -// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a // google.protobuf.OneofDescriptorProto message. func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { return &descriptorpb.OneofDescriptorProto{ @@ -177,7 +177,7 @@ func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.On } } -// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a // google.protobuf.EnumDescriptorProto message. func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { p := &descriptorpb.EnumDescriptorProto{ @@ -200,7 +200,7 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD return p } -// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a // google.protobuf.EnumValueDescriptorProto message. func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { return &descriptorpb.EnumValueDescriptorProto{ @@ -210,7 +210,7 @@ func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descrip } } -// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a // google.protobuf.ServiceDescriptorProto message. func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { p := &descriptorpb.ServiceDescriptorProto{ @@ -223,7 +223,7 @@ func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descripto return p } -// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a // google.protobuf.MethodDescriptorProto message. func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { p := &descriptorpb.MethodDescriptorProto{ diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 55aa1492..00b01fbd 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -10,46 +10,46 @@ // // # Protocol Buffer Descriptors // -// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor]) // are immutable objects that represent protobuf type information. // They are wrappers around the messages declared in descriptor.proto. // Protobuf descriptors alone lack any information regarding Go types. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Descriptor and ProtoReflect.Descriptor accessors respectively // return the protobuf descriptor for the values. // // The protobuf descriptor interfaces are not meant to be implemented by // user code since they might need to be extended in the future to support // additions to the protobuf language. -// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// The [google.golang.org/protobuf/reflect/protodesc] package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // // # Go Type Descriptors // -// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. // There is commonly a one-to-one relationship between protobuf descriptors and // Go type descriptors, but it can potentially be a one-to-many relationship. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Type and ProtoReflect.Type accessors respectively // return the protobuf descriptor for the values. // -// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// The [google.golang.org/protobuf/types/dynamicpb] package can be used to // create Go type descriptors from protobuf descriptors. // // # Value Interfaces // -// The Enum and Message interfaces provide a reflective view over an +// The [Enum] and [Message] interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve // the enum value number for any concrete enum type. For messages, it provides // the ability to access or manipulate fields of the message. // -// To convert a proto.Message to a protoreflect.Message, use the +// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the // former's ProtoReflect method. Since the ProtoReflect method is new to the // v2 message interface, it may not be present on older message implementations. -// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// The [github.com/golang/protobuf/proto.MessageReflect] function can be used // to obtain a reflective view on older messages. // // # Relationships @@ -71,12 +71,12 @@ // │ │ // └────────────────── Type() ───────┘ // -// • An EnumType describes a concrete Go enum type. +// • An [EnumType] describes a concrete Go enum type. // It has an EnumDescriptor and can construct an Enum instance. // -// • An EnumDescriptor describes an abstract protobuf enum type. +// • An [EnumDescriptor] describes an abstract protobuf enum type. // -// • An Enum is a concrete enum instance. Generated enums implement Enum. +// • An [Enum] is a concrete enum instance. Generated enums implement Enum. // // ┌──────────────── New() ─────────────────┠// │ │ @@ -90,24 +90,26 @@ // │ │ // └─────────────────── Type() ─────────┘ // -// • A MessageType describes a concrete Go message type. -// It has a MessageDescriptor and can construct a Message instance. -// Just as how Go's reflect.Type is a reflective description of a Go type, -// a MessageType is a reflective description of a Go type for a protobuf message. +// • A [MessageType] describes a concrete Go message type. +// It has a [MessageDescriptor] and can construct a [Message] instance. +// Just as how Go's [reflect.Type] is a reflective description of a Go type, +// a [MessageType] is a reflective description of a Go type for a protobuf message. // -// • A MessageDescriptor describes an abstract protobuf message type. -// It has no understanding of Go types. In order to construct a MessageType -// from just a MessageDescriptor, you can consider looking up the message type -// in the global registry using protoregistry.GlobalTypes.FindMessageByName -// or constructing a dynamic MessageType using dynamicpb.NewMessageType. +// • A [MessageDescriptor] describes an abstract protobuf message type. +// It has no understanding of Go types. In order to construct a [MessageType] +// from just a [MessageDescriptor], you can consider looking up the message type +// in the global registry using the FindMessageByName method on +// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes] +// or constructing a dynamic [MessageType] using +// [google.golang.org/protobuf/types/dynamicpb.NewMessageType]. // -// • A Message is a reflective view over a concrete message instance. -// Generated messages implement ProtoMessage, which can convert to a Message. -// Just as how Go's reflect.Value is a reflective view over a Go value, -// a Message is a reflective view over a concrete protobuf message instance. -// Using Go reflection as an analogy, the ProtoReflect method is similar to -// calling reflect.ValueOf, and the Message.Interface method is similar to -// calling reflect.Value.Interface. +// • A [Message] is a reflective view over a concrete message instance. +// Generated messages implement [ProtoMessage], which can convert to a [Message]. +// Just as how Go's [reflect.Value] is a reflective view over a Go value, +// a [Message] is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to +// calling [reflect.ValueOf], and the [Message.Interface] method is similar to +// calling [reflect.Value.Interface]. // // ┌── TypeDescriptor() ──┠┌───── Descriptor() ─────┠// │ V │ V @@ -119,15 +121,15 @@ // │ │ // └────── implements ────────┘ // -// • An ExtensionType describes a concrete Go implementation of an extension. -// It has an ExtensionTypeDescriptor and can convert to/from -// abstract Values and Go values. +// • An [ExtensionType] describes a concrete Go implementation of an extension. +// It has an [ExtensionTypeDescriptor] and can convert to/from +// an abstract [Value] and a Go value. // -// • An ExtensionTypeDescriptor is an ExtensionDescriptor -// which also has an ExtensionType. +// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor] +// which also has an [ExtensionType]. // -// • An ExtensionDescriptor describes an abstract protobuf extension field and -// may not always be an ExtensionTypeDescriptor. +// • An [ExtensionDescriptor] describes an abstract protobuf extension field and +// may not always be an [ExtensionTypeDescriptor]. package protoreflect import ( @@ -142,7 +144,7 @@ type doNotImplement pragma.DoNotImplement // ProtoMessage is the top-level interface that all proto messages implement. // This is declared in the protoreflect package to avoid a cyclic dependency; -// use the proto.Message type instead, which aliases this type. +// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type. type ProtoMessage interface{ ProtoReflect() Message } // Syntax is the language version of the proto file. @@ -151,8 +153,9 @@ type Syntax syntax type syntax int8 // keep exact type opaque as the int type may change const ( - Proto2 Syntax = 2 - Proto3 Syntax = 3 + Proto2 Syntax = 2 + Proto3 Syntax = 3 + Editions Syntax = 4 ) // IsValid reports whether the syntax is valid. @@ -172,6 +175,8 @@ func (s Syntax) String() string { return "proto2" case Proto3: return "proto3" + case Editions: + return "editions" default: return fmt.Sprintf("", s) } @@ -436,7 +441,7 @@ type Names interface { // FullName is a qualified name that uniquely identifies a proto declaration. // A qualified name is the concatenation of the proto package along with the // fully-declared name (i.e., name of parent preceding the name of the child), -// with a '.' delimiter placed between each Name. +// with a '.' delimiter placed between each [Name]. // // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" @@ -480,7 +485,7 @@ func isLetterDigit(c byte) bool { } // Name returns the short name, which is the last identifier segment. -// A single segment FullName is the Name itself. +// A single segment FullName is the [Name] itself. func (n FullName) Name() Name { if i := strings.LastIndexByte(string(n), '.'); i >= 0 { return Name(n[i+1:]) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 717b106f..7dcc2ff0 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,7 +35,7 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) - case 13: + case 14: b = p.appendSingularField(b, "edition", nil) } return b @@ -160,8 +160,6 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "java_generic_services", nil) case 18: b = p.appendSingularField(b, "py_generic_services", nil) - case 42: - b = p.appendSingularField(b, "php_generic_services", nil) case 23: b = p.appendSingularField(b, "deprecated", nil) case 31: @@ -180,6 +178,8 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "php_metadata_namespace", nil) case 45: b = p.appendSingularField(b, "ruby_package", nil) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -240,6 +240,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "map_entry", nil) case 11: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 12: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -285,6 +287,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 6: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 7: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -330,6 +334,8 @@ func (p *SourcePath) appendServiceOptions(b []byte) []byte { return b } switch (*p)[0] { + case 34: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 33: b = p.appendSingularField(b, "deprecated", nil) case 999: @@ -361,16 +367,39 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "debug_redact", nil) case 17: b = p.appendSingularField(b, "retention", nil) - case 18: - b = p.appendSingularField(b, "target", nil) case 19: b = p.appendRepeatedField(b, "targets", nil) + case 20: + b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) + case 21: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFeatureSet(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "field_presence", nil) + case 2: + b = p.appendSingularField(b, "enum_type", nil) + case 3: + b = p.appendSingularField(b, "repeated_field_encoding", nil) + case 4: + b = p.appendSingularField(b, "utf8_validation", nil) + case 5: + b = p.appendSingularField(b, "message_encoding", nil) + case 6: + b = p.appendSingularField(b, "json_format", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { if len(*p) == 0 { return b @@ -422,6 +451,8 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) case 2: b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "verification", nil) } @@ -433,6 +464,8 @@ func (p *SourcePath) appendOneofOptions(b []byte) []byte { return b } switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -446,6 +479,10 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { switch (*p)[0] { case 1: b = p.appendSingularField(b, "deprecated", nil) + case 2: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 3: + b = p.appendSingularField(b, "debug_redact", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -461,12 +498,27 @@ func (p *SourcePath) appendMethodOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 34: b = p.appendSingularField(b, "idempotency_level", nil) + case 35: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 3: + b = p.appendSingularField(b, "edition", nil) + case 2: + b = p.appendSingularField(b, "value", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b @@ -491,8 +543,6 @@ func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { b = p.appendSingularField(b, "full_name", nil) case 3: b = p.appendSingularField(b, "type", nil) - case 4: - b = p.appendSingularField(b, "is_repeated", nil) case 5: b = p.appendSingularField(b, "reserved", nil) case 6: diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 3867470d..60ff62b4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -12,7 +12,7 @@ package protoreflect // exactly identical. However, it is possible for the same semantically // identical proto type to be represented by multiple type descriptors. // -// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// For example, suppose we have t1 and t2 which are both an [MessageDescriptor]. // If t1 == t2, then the types are definitely equal and all accessors return // the same information. However, if t1 != t2, then it is still possible that // they still represent the same proto type (e.g., t1.FullName == t2.FullName). @@ -115,7 +115,7 @@ type Descriptor interface { // corresponds with the google.protobuf.FileDescriptorProto message. // // Top-level declarations: -// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor]. type FileDescriptor interface { Descriptor // Descriptor.FullName is identical to Package @@ -180,8 +180,8 @@ type FileImport struct { // corresponds with the google.protobuf.DescriptorProto message. // // Nested declarations: -// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, -// and/or MessageDescriptor. +// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor], +// and/or [MessageDescriptor]. type MessageDescriptor interface { Descriptor @@ -214,7 +214,7 @@ type MessageDescriptor interface { ExtensionRanges() FieldRanges // ExtensionRangeOptions returns the ith extension range options. // - // To avoid a dependency cycle, this method returns a proto.Message value, + // To avoid a dependency cycle, this method returns a proto.Message] value, // which always contains a google.protobuf.ExtensionRangeOptions message. // This method returns a typed nil-pointer if no options are present. // The caller must import the descriptorpb package to use this. @@ -231,9 +231,9 @@ type MessageDescriptor interface { } type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } -// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation. // It is recommended that implementations of this interface also implement the -// MessageFieldTypes interface. +// [MessageFieldTypes] interface. type MessageType interface { // New returns a newly allocated empty message. // It may return nil for synthetic messages representing a map entry. @@ -249,19 +249,19 @@ type MessageType interface { Descriptor() MessageDescriptor } -// MessageFieldTypes extends a MessageType by providing type information +// MessageFieldTypes extends a [MessageType] by providing type information // regarding enums and messages referenced by the message fields. type MessageFieldTypes interface { MessageType - // Enum returns the EnumType for the ith field in Descriptor.Fields. + // Enum returns the EnumType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not an enum kind. // It panics if out of bounds. // // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() Enum(i int) EnumType - // Message returns the MessageType for the ith field in Descriptor.Fields. + // Message returns the MessageType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not a message or group kind. // It panics if out of bounds. // @@ -286,8 +286,8 @@ type MessageDescriptors interface { // corresponds with the google.protobuf.FieldDescriptorProto message. // // It is used for both normal fields defined within the parent message -// (e.g., MessageDescriptor.Fields) and fields that extend some remote message -// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message +// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]). type FieldDescriptor interface { Descriptor @@ -344,7 +344,7 @@ type FieldDescriptor interface { // IsMap reports whether this field represents a map, // where the value type for the associated field is a Map. // It is equivalent to checking whether Cardinality is Repeated, - // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true. IsMap() bool // MapKey returns the field descriptor for the key in the map entry. @@ -419,7 +419,7 @@ type OneofDescriptor interface { // IsSynthetic reports whether this is a synthetic oneof created to support // proto3 optional semantics. If true, Fields contains exactly one field - // with HasOptionalKeyword specified. + // with FieldDescriptor.HasOptionalKeyword specified. IsSynthetic() bool // Fields is a list of fields belonging to this oneof. @@ -442,10 +442,10 @@ type OneofDescriptors interface { doNotImplement } -// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation. type ExtensionDescriptor = FieldDescriptor -// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType]. type ExtensionTypeDescriptor interface { ExtensionDescriptor @@ -470,12 +470,12 @@ type ExtensionDescriptors interface { doNotImplement } -// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete // Go implementation. The nested field descriptor must be for a extension field. // // While a normal field is a member of the parent message that it is declared -// within (see Descriptor.Parent), an extension field is a member of some other -// target message (see ExtensionDescriptor.Extendee) and may have no +// within (see [Descriptor.Parent]), an extension field is a member of some other +// target message (see [FieldDescriptor.ContainingMessage]) and may have no // relationship with the parent. However, the full name of an extension field is // relative to the parent that it is declared within. // @@ -532,7 +532,7 @@ type ExtensionType interface { // corresponds with the google.protobuf.EnumDescriptorProto message. // // Nested declarations: -// EnumValueDescriptor. +// [EnumValueDescriptor]. type EnumDescriptor interface { Descriptor @@ -548,7 +548,7 @@ type EnumDescriptor interface { } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } -// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation. type EnumType interface { // New returns an instance of this enum type with its value set to n. New(n EnumNumber) Enum @@ -610,7 +610,7 @@ type EnumValueDescriptors interface { // ServiceDescriptor describes a service and // corresponds with the google.protobuf.ServiceDescriptorProto message. // -// Nested declarations: MethodDescriptor. +// Nested declarations: [MethodDescriptor]. type ServiceDescriptor interface { Descriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index 37601b78..a7b0d06f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -27,16 +27,16 @@ type Enum interface { // Message is a reflective interface for a concrete message value, // encapsulating both type and value information for the message. // -// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// Accessor/mutators for individual fields are keyed by [FieldDescriptor]. // For non-extension fields, the descriptor must exactly match the // field known by the parent message. -// For extension fields, the descriptor must implement ExtensionTypeDescriptor, -// extend the parent message (i.e., have the same message FullName), and +// For extension fields, the descriptor must implement [ExtensionTypeDescriptor], +// extend the parent message (i.e., have the same message [FullName]), and // be within the parent's extension range. // -// Each field Value can be a scalar or a composite type (Message, List, or Map). -// See Value for the Go types associated with a FieldDescriptor. -// Providing a Value that is invalid or of an incorrect type panics. +// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]). +// See [Value] for the Go types associated with a [FieldDescriptor]. +// Providing a [Value] that is invalid or of an incorrect type panics. type Message interface { // Descriptor returns message descriptor, which contains only the protobuf // type information for the message. @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // "google.golang.org/protobuf/runtime/protoiface".Methods. + // google.golang.org/protobuf/runtime/protoiface.Methods. // Consult the protoiface package documentation for details. ProtoMethods() *methods } @@ -175,8 +175,8 @@ func (b RawFields) IsValid() bool { } // List is a zero-indexed, ordered list. -// The element Value type is determined by FieldDescriptor.Kind. -// Providing a Value that is invalid or of an incorrect type panics. +// The element [Value] type is determined by [FieldDescriptor.Kind]. +// Providing a [Value] that is invalid or of an incorrect type panics. type List interface { // Len reports the number of entries in the List. // Get, Set, and Truncate panic with out of bound indexes. @@ -226,9 +226,9 @@ type List interface { } // Map is an unordered, associative map. -// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. -// The entry Value type is determined by FieldDescriptor.MapValue.Kind. -// Providing a MapKey or Value that is invalid or of an incorrect type panics. +// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind. +// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind. +// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics. type Map interface { // Len reports the number of elements in the map. Len() int diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go index 59165254..654599d4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -24,19 +24,19 @@ import ( // Unlike the == operator, a NaN is equal to another NaN. // // - Enums are equal if they contain the same number. -// Since Value does not contain an enum descriptor, +// Since [Value] does not contain an enum descriptor, // enum values do not consider the type of the enum. // // - Other scalar values are equal if they contain the same value. // -// - Message values are equal if they belong to the same message descriptor, +// - [Message] values are equal if they belong to the same message descriptor, // have the same set of populated known and extension field values, // and the same set of unknown fields values. // -// - Lists are equal if they are the same length and +// - [List] values are equal if they are the same length and // each corresponding element is equal. // -// - Maps are equal if they have the same set of keys and +// - [Map] values are equal if they have the same set of keys and // the corresponding value for each key is equal. func (v1 Value) Equal(v2 Value) bool { return equalValue(v1, v2) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 08e5ef73..16030973 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -11,7 +11,7 @@ import ( // Value is a union where only one Go type may be set at a time. // The Value is used to represent all possible values a field may take. -// The following shows which Go type is used to represent each proto Kind: +// The following shows which Go type is used to represent each proto [Kind]: // // â•”â•â•â•â•â•â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— // â•‘ Go type │ Protobuf kind â•‘ @@ -31,22 +31,22 @@ import ( // // Multiple protobuf Kinds may be represented by a single Go type if the type // can losslessly represent the information for the proto kind. For example, -// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64, // but use different integer encoding methods. // -// The List or Map types are used if the field cardinality is repeated. -// A field is a List if FieldDescriptor.IsList reports true. -// A field is a Map if FieldDescriptor.IsMap reports true. +// The [List] or [Map] types are used if the field cardinality is repeated. +// A field is a [List] if [FieldDescriptor.IsList] reports true. +// A field is a [Map] if [FieldDescriptor.IsMap] reports true. // // Converting to/from a Value and a concrete Go value panics on type mismatch. -// For example, ValueOf("hello").Int() panics because this attempts to +// For example, [ValueOf]("hello").Int() panics because this attempts to // retrieve an int64 from a string. // -// List, Map, and Message Values are called "composite" values. +// [List], [Map], and [Message] Values are called "composite" values. // // A composite Value may alias (reference) memory at some location, // such that changes to the Value updates the that location. -// A composite value acquired with a Mutable method, such as Message.Mutable, +// A composite value acquired with a Mutable method, such as [Message.Mutable], // always references the source object. // // For example: @@ -65,7 +65,7 @@ import ( // // appending to the List here may or may not modify the message. // list.Append(protoreflect.ValueOfInt32(0)) // -// Some operations, such as Message.Get, may return an "empty, read-only" +// Some operations, such as [Message.Get], may return an "empty, read-only" // composite Value. Modifying an empty, read-only value panics. type Value value @@ -306,7 +306,7 @@ func (v Value) Float() float64 { } } -// String returns v as a string. Since this method implements fmt.Stringer, +// String returns v as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (v Value) String() string { switch v.typ { @@ -327,7 +327,7 @@ func (v Value) Bytes() []byte { } } -// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber]. func (v Value) Enum() EnumNumber { switch v.typ { case enumType: @@ -337,7 +337,7 @@ func (v Value) Enum() EnumNumber { } } -// Message returns v as a Message and panics if the type is not a Message. +// Message returns v as a [Message] and panics if the type is not a [Message]. func (v Value) Message() Message { switch vi := v.getIface().(type) { case Message: @@ -347,7 +347,7 @@ func (v Value) Message() Message { } } -// List returns v as a List and panics if the type is not a List. +// List returns v as a [List] and panics if the type is not a [List]. func (v Value) List() List { switch vi := v.getIface().(type) { case List: @@ -357,7 +357,7 @@ func (v Value) List() List { } } -// Map returns v as a Map and panics if the type is not a Map. +// Map returns v as a [Map] and panics if the type is not a [Map]. func (v Value) Map() Map { switch vi := v.getIface().(type) { case Map: @@ -367,7 +367,7 @@ func (v Value) Map() Map { } } -// MapKey returns v as a MapKey and panics for invalid MapKey types. +// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types. func (v Value) MapKey() MapKey { switch v.typ { case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: @@ -378,8 +378,8 @@ func (v Value) MapKey() MapKey { } // MapKey is used to index maps, where the Go type of the MapKey must match -// the specified key Kind (see MessageDescriptor.IsMapEntry). -// The following shows what Go type is used to represent each proto Kind: +// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]). +// The following shows what Go type is used to represent each proto [Kind]: // // â•”â•â•â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— // â•‘ Go type │ Protobuf kind â•‘ @@ -392,13 +392,13 @@ func (v Value) MapKey() MapKey { // â•‘ string │ StringKind â•‘ // â•šâ•â•â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• // -// A MapKey is constructed and accessed through a Value: +// A MapKey is constructed and accessed through a [Value]: // // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // -// The MapKey is a strict subset of valid types used in Value; -// converting a Value to a MapKey with an invalid type panics. +// The MapKey is a strict subset of valid types used in [Value]; +// converting a [Value] to a MapKey with an invalid type panics. type MapKey value // IsValid reports whether k is populated with a value. @@ -426,13 +426,13 @@ func (k MapKey) Uint() uint64 { return Value(k).Uint() } -// String returns k as a string. Since this method implements fmt.Stringer, +// String returns k as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (k MapKey) String() string { return Value(k).String() } -// Value returns k as a Value. +// Value returns k as a [Value]. func (k MapKey) Value() Value { return Value(k) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go similarity index 97% rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 702ddf22..b1fdbe3e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go new file mode 100644 index 00000000..43547011 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + ifaceHeader struct { + _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() string { + return unsafe.String((*byte)(v.ptr), v.num) +} +func (v Value) getBytes() []byte { + return unsafe.Slice((*byte)(v.ptr), v.num) +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index aeb55977..6267dc52 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -5,12 +5,12 @@ // Package protoregistry provides data structures to register and lookup // protobuf descriptor types. // -// The Files registry contains file descriptors and provides the ability +// The [Files] registry contains file descriptors and provides the ability // to iterate over the files or lookup a specific descriptor within the files. -// Files only contains protobuf descriptors and has no understanding of Go +// [Files] only contains protobuf descriptors and has no understanding of Go // type information that may be associated with each descriptor. // -// The Types registry contains descriptor types for which there is a known +// The [Types] registry contains descriptor types for which there is a known // Go type associated with that descriptor. It provides the ability to iterate // over the registered types or lookup a type by name. package protoregistry @@ -218,7 +218,7 @@ func (r *Files) checkGenProtoConflict(path string) { // FindDescriptorByName looks up a descriptor by the full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { if r == nil { return nil, NotFound @@ -310,7 +310,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. // This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { @@ -431,7 +431,7 @@ func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflec // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type MessageTypeResolver interface { // FindMessageByName looks up a message by its full name. // E.g., "google.protobuf.Any" @@ -451,7 +451,7 @@ type MessageTypeResolver interface { // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type ExtensionTypeResolver interface { // FindExtensionByName looks up a extension field by the field's full name. // Note that this is the full name of the field as determined by @@ -590,7 +590,7 @@ func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interfac // FindEnumByName looks up an enum by its full name. // E.g., "google.protobuf.Field.Kind". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { if r == nil { return nil, NotFound @@ -611,7 +611,7 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp // FindMessageByName looks up a message by its full name, // e.g. "google.protobuf.Any". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { if r == nil { return nil, NotFound @@ -632,7 +632,7 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. @@ -662,7 +662,7 @@ func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -703,7 +703,7 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E // FindExtensionByNumber looks up a extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 04c00f73..78624cf6 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,103 @@ import ( sync "sync" ) +// The full set of known editions. +type Edition int32 + +const ( + // A placeholder for an unknown edition value. + Edition_EDITION_UNKNOWN Edition = 0 + // Legacy syntax "editions". These pre-date editions, but behave much like + // distinct editions. These can't be used to specify the edition of proto + // files, but feature definitions must supply proto2/proto3 defaults for + // backwards compatibility. + Edition_EDITION_PROTO2 Edition = 998 + Edition_EDITION_PROTO3 Edition = 999 + // Editions that have been released. The specific values are arbitrary and + // should not be depended on, but they will always be time-ordered for easy + // comparison. + Edition_EDITION_2023 Edition = 1000 + Edition_EDITION_2024 Edition = 1001 + // Placeholder editions for testing feature resolution. These should not be + // used or relyed on outside of tests. + Edition_EDITION_1_TEST_ONLY Edition = 1 + Edition_EDITION_2_TEST_ONLY Edition = 2 + Edition_EDITION_99997_TEST_ONLY Edition = 99997 + Edition_EDITION_99998_TEST_ONLY Edition = 99998 + Edition_EDITION_99999_TEST_ONLY Edition = 99999 + // Placeholder for specifying unbounded edition support. This should only + // ever be used by plugins that can expect to never require any changes to + // support a new edition. + Edition_EDITION_MAX Edition = 2147483647 +) + +// Enum value maps for Edition. +var ( + Edition_name = map[int32]string{ + 0: "EDITION_UNKNOWN", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1001: "EDITION_2024", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + 2147483647: "EDITION_MAX", + } + Edition_value = map[string]int32{ + "EDITION_UNKNOWN": 0, + "EDITION_PROTO2": 998, + "EDITION_PROTO3": 999, + "EDITION_2023": 1000, + "EDITION_2024": 1001, + "EDITION_1_TEST_ONLY": 1, + "EDITION_2_TEST_ONLY": 2, + "EDITION_99997_TEST_ONLY": 99997, + "EDITION_99998_TEST_ONLY": 99998, + "EDITION_99999_TEST_ONLY": 99999, + "EDITION_MAX": 2147483647, + } +) + +func (x Edition) Enum() *Edition { + p := new(Edition) + *p = x + return p +} + +func (x Edition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Edition) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (Edition) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x Edition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *Edition) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = Edition(num) + return nil +} + +// Deprecated: Use Edition.Descriptor instead. +func (Edition) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -80,11 +177,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -125,9 +222,10 @@ const ( FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 + // Group type is deprecated and not supported after google.protobuf. However, Proto3 // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. + // treat group fields as unknown fields. In Editions, the group wire format + // can be enabled via the `message_encoding` feature. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. // New in version 2. @@ -195,11 +293,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -226,21 +324,24 @@ type FieldDescriptorProto_Label int32 const ( // 0 is reserved for errors FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 + // The required label is only allowed in google.protobuf. In proto3 and Editions + // it's explicitly prohibited. In Editions, the `field_presence` feature + // can be used to get this behavior. + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 ) // Enum value maps for FieldDescriptorProto_Label. var ( FieldDescriptorProto_Label_name = map[int32]string{ 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", 3: "LABEL_REPEATED", + 2: "LABEL_REQUIRED", } FieldDescriptorProto_Label_value = map[string]int32{ "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, "LABEL_REPEATED": 3, + "LABEL_REQUIRED": 2, } ) @@ -255,11 +356,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -316,11 +417,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -382,11 +483,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -444,11 +545,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -506,11 +607,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -590,11 +691,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -652,11 +753,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -678,6 +779,363 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +type FeatureSet_FieldPresence int32 + +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0 + FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1 + FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2 + FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3 +) + +// Enum value maps for FeatureSet_FieldPresence. +var ( + FeatureSet_FieldPresence_name = map[int32]string{ + 0: "FIELD_PRESENCE_UNKNOWN", + 1: "EXPLICIT", + 2: "IMPLICIT", + 3: "LEGACY_REQUIRED", + } + FeatureSet_FieldPresence_value = map[string]int32{ + "FIELD_PRESENCE_UNKNOWN": 0, + "EXPLICIT": 1, + "IMPLICIT": 2, + "LEGACY_REQUIRED": 3, + } +) + +func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence { + p := new(FeatureSet_FieldPresence) + *p = x + return p +} + +func (x FeatureSet_FieldPresence) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() +} + +func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[10] +} + +func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_FieldPresence(num) + return nil +} + +// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead. +func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +type FeatureSet_EnumType int32 + +const ( + FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0 + FeatureSet_OPEN FeatureSet_EnumType = 1 + FeatureSet_CLOSED FeatureSet_EnumType = 2 +) + +// Enum value maps for FeatureSet_EnumType. +var ( + FeatureSet_EnumType_name = map[int32]string{ + 0: "ENUM_TYPE_UNKNOWN", + 1: "OPEN", + 2: "CLOSED", + } + FeatureSet_EnumType_value = map[string]int32{ + "ENUM_TYPE_UNKNOWN": 0, + "OPEN": 1, + "CLOSED": 2, + } +) + +func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType { + p := new(FeatureSet_EnumType) + *p = x + return p +} + +func (x FeatureSet_EnumType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() +} + +func (FeatureSet_EnumType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[11] +} + +func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnumType(num) + return nil +} + +// Deprecated: Use FeatureSet_EnumType.Descriptor instead. +func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1} +} + +type FeatureSet_RepeatedFieldEncoding int32 + +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0 + FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1 + FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2 +) + +// Enum value maps for FeatureSet_RepeatedFieldEncoding. +var ( + FeatureSet_RepeatedFieldEncoding_name = map[int32]string{ + 0: "REPEATED_FIELD_ENCODING_UNKNOWN", + 1: "PACKED", + 2: "EXPANDED", + } + FeatureSet_RepeatedFieldEncoding_value = map[string]int32{ + "REPEATED_FIELD_ENCODING_UNKNOWN": 0, + "PACKED": 1, + "EXPANDED": 2, + } +) + +func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding { + p := new(FeatureSet_RepeatedFieldEncoding) + *p = x + return p +} + +func (x FeatureSet_RepeatedFieldEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() +} + +func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[12] +} + +func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_RepeatedFieldEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead. +func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2} +} + +type FeatureSet_Utf8Validation int32 + +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 + FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 + FeatureSet_NONE FeatureSet_Utf8Validation = 3 +) + +// Enum value maps for FeatureSet_Utf8Validation. +var ( + FeatureSet_Utf8Validation_name = map[int32]string{ + 0: "UTF8_VALIDATION_UNKNOWN", + 2: "VERIFY", + 3: "NONE", + } + FeatureSet_Utf8Validation_value = map[string]int32{ + "UTF8_VALIDATION_UNKNOWN": 0, + "VERIFY": 2, + "NONE": 3, + } +) + +func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation { + p := new(FeatureSet_Utf8Validation) + *p = x + return p +} + +func (x FeatureSet_Utf8Validation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() +} + +func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[13] +} + +func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_Utf8Validation(num) + return nil +} + +// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead. +func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3} +} + +type FeatureSet_MessageEncoding int32 + +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0 + FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1 + FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2 +) + +// Enum value maps for FeatureSet_MessageEncoding. +var ( + FeatureSet_MessageEncoding_name = map[int32]string{ + 0: "MESSAGE_ENCODING_UNKNOWN", + 1: "LENGTH_PREFIXED", + 2: "DELIMITED", + } + FeatureSet_MessageEncoding_value = map[string]int32{ + "MESSAGE_ENCODING_UNKNOWN": 0, + "LENGTH_PREFIXED": 1, + "DELIMITED": 2, + } +) + +func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding { + p := new(FeatureSet_MessageEncoding) + *p = x + return p +} + +func (x FeatureSet_MessageEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() +} + +func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[14] +} + +func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_MessageEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead. +func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4} +} + +type FeatureSet_JsonFormat int32 + +const ( + FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0 + FeatureSet_ALLOW FeatureSet_JsonFormat = 1 + FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2 +) + +// Enum value maps for FeatureSet_JsonFormat. +var ( + FeatureSet_JsonFormat_name = map[int32]string{ + 0: "JSON_FORMAT_UNKNOWN", + 1: "ALLOW", + 2: "LEGACY_BEST_EFFORT", + } + FeatureSet_JsonFormat_value = map[string]int32{ + "JSON_FORMAT_UNKNOWN": 0, + "ALLOW": 1, + "LEGACY_BEST_EFFORT": 2, + } +) + +func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat { + p := new(FeatureSet_JsonFormat) + *p = x + return p +} + +func (x FeatureSet_JsonFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() +} + +func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[15] +} + +func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_JsonFormat(num) + return nil +} + +// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead. +func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -716,11 +1174,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -739,7 +1197,7 @@ func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { // Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -822,8 +1280,8 @@ type FileDescriptorProto struct { // // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - // The edition of the proto file, which is an opaque string. - Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` + // The edition of the proto file. + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -942,11 +1400,11 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } -func (x *FileDescriptorProto) GetEdition() string { +func (x *FileDescriptorProto) GetEdition() Edition { if x != nil && x.Edition != nil { return *x.Edition } - return "" + return Edition_EDITION_UNKNOWN } // Describes a message type. @@ -1079,13 +1537,14 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - // go/protobuf-stripping-extension-declarations - // Like Metadata, but we use a repeated field to hold all extension - // declarations. This should avoid the size increases of transforming a large - // extension range into small ranges in generated binaries. + // For external users: DO NOT USE. We are in the process of open sourcing + // extension declaration and executing internal cleanups before it can be + // used externally. Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The verification state of the range. - // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` } @@ -1141,6 +1600,13 @@ func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declar return nil } +func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { if x != nil && x.Verification != nil { return *x.Verification @@ -1186,12 +1652,12 @@ type FieldDescriptorProto struct { // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // - // When proto3_optional is true, this field must be belong to a oneof to - // signal to old proto3 clients that presence is tracked for this field. This - // oneof is known as a "synthetic" oneof, and this field must be its sole - // member (each proto3 optional field gets its own synthetic oneof). Synthetic - // oneofs exist in the descriptor only, and do not generate any API. Synthetic - // oneofs must be ordered after all "real" oneofs. + // When proto3_optional is true, this field must belong to a oneof to signal + // to old proto3 clients that presence is tracked for this field. This oneof + // is known as a "synthetic" oneof, and this field must be its sole member + // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs + // exist in the descriptor only, and do not generate any API. Synthetic oneofs + // must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still @@ -1738,7 +2204,6 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -1772,6 +2237,8 @@ type FileOptions struct { // is empty. When this option is not set, the package name will be used for // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -1785,7 +2252,6 @@ const ( Default_FileOptions_CcGenericServices = bool(false) Default_FileOptions_JavaGenericServices = bool(false) Default_FileOptions_PyGenericServices = bool(false) - Default_FileOptions_PhpGenericServices = bool(false) Default_FileOptions_Deprecated = bool(false) Default_FileOptions_CcEnableArenas = bool(true) ) @@ -1893,13 +2359,6 @@ func (x *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } -func (x *FileOptions) GetPhpGenericServices() bool { - if x != nil && x.PhpGenericServices != nil { - return *x.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - func (x *FileOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -1963,6 +2422,13 @@ func (x *FileOptions) GetRubyPackage() string { return "" } +func (x *FileOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2006,10 +2472,6 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - // // Whether the message is an automatically generated map entry type for the // maps field. // @@ -2030,6 +2492,10 @@ type MessageOptions struct { // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // Enable the legacy handling of JSON field name conflicts. This lowercases // and strips underscored from the fields before comparison in proto3 only. @@ -2039,11 +2505,13 @@ type MessageOptions struct { // This should only be used as a temporary measure against broken builds due // to the change in behavior for JSON field name conflicts. // - // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // TODO This is legacy behavior we plan to remove once downstream // teams have had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2123,6 +2591,13 @@ func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *MessageOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2147,7 +2622,9 @@ type FieldOptions struct { // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. + // false will avoid using packed encoding. This option is prohibited in + // Editions, but the `repeated_field_encoding` feature can be used to control + // the behavior. Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types @@ -2178,19 +2655,11 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - // - // As of May 2022, lazy verifies the contents of the byte stream during - // parsing. An invalid byte stream will cause the overall parsing to fail. + // Note that lazy message fields are still eagerly verified to check + // ill-formed wireformat or missing required fields. Calling IsInitialized() + // on the outer message would fail if the inner message has missing required + // fields. Failed verification would result in parsing failure (except when + // uninitialized messages are acceptable). Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` // unverified_lazy does no correctness checks on the byte stream. This should // only be used where lazy with verification is prohibitive for performance @@ -2205,11 +2674,12 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` - Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2320,17 +2790,23 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { return FieldOptions_RETENTION_UNKNOWN } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { - if x != nil && x.Target != nil { - return *x.Target +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets } - return FieldOptions_TARGET_TYPE_UNKNOWN + return nil } -func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { +func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault { if x != nil { - return x.Targets + return x.EditionDefaults + } + return nil +} + +func (x *FieldOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features } return nil } @@ -2348,6 +2824,8 @@ type OneofOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2384,6 +2862,13 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } +func (x *OneofOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2409,11 +2894,13 @@ type EnumOptions struct { // and strips underscored from the fields before comparison in proto3 only. // The new behavior takes `json_name` into account and applies to proto2 as // well. - // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // TODO Remove this legacy behavior once downstream teams have // had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2477,6 +2964,13 @@ func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *EnumOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2495,13 +2989,20 @@ type EnumValueOptions struct { // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // Indicate that fields annotated with this enum value should not be printed + // out when using debug formats, e.g. when the field contains sensitive + // credentials. + DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for EnumValueOptions fields. const ( - Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_DebugRedact = bool(false) ) func (x *EnumValueOptions) Reset() { @@ -2543,6 +3044,20 @@ func (x *EnumValueOptions) GetDeprecated() bool { return Default_EnumValueOptions_Deprecated } +func (x *EnumValueOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *EnumValueOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_EnumValueOptions_DebugRedact +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2556,6 +3071,8 @@ type ServiceOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, @@ -2602,6 +3119,13 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } +func (x *ServiceOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2628,6 +3152,8 @@ type MethodOptions struct { // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2684,6 +3210,13 @@ func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { return Default_MethodOptions_IdempotencyLevel } +func (x *MethodOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2770,28 +3303,193 @@ func (x *UninterpretedOption) GetNegativeIntValue() int64 { if x != nil && x.NegativeIntValue != nil { return *x.NegativeIntValue } - return 0 + return 0 +} + +func (x *UninterpretedOption) GetDoubleValue() float64 { + if x != nil && x.DoubleValue != nil { + return *x.DoubleValue + } + return 0 +} + +func (x *UninterpretedOption) GetStringValue() []byte { + if x != nil { + return x.StringValue + } + return nil +} + +func (x *UninterpretedOption) GetAggregateValue() string { + if x != nil && x.AggregateValue != nil { + return *x.AggregateValue + } + return "" +} + +// TODO Enums in C++ gencode (and potentially other languages) are +// not well scoped. This means that each of the feature enums below can clash +// with each other. The short names we've chosen maximize call-site +// readability, but leave us very open to this scenario. A future feature will +// be designed and implemented to handle this, hopefully before we ever hit a +// conflict here. +type FeatureSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` +} + +func (x *FeatureSet) Reset() { + *x = FeatureSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet) ProtoMessage() {} + +func (x *FeatureSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead. +func (*FeatureSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence { + if x != nil && x.FieldPresence != nil { + return *x.FieldPresence + } + return FeatureSet_FIELD_PRESENCE_UNKNOWN +} + +func (x *FeatureSet) GetEnumType() FeatureSet_EnumType { + if x != nil && x.EnumType != nil { + return *x.EnumType + } + return FeatureSet_ENUM_TYPE_UNKNOWN +} + +func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding { + if x != nil && x.RepeatedFieldEncoding != nil { + return *x.RepeatedFieldEncoding + } + return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation { + if x != nil && x.Utf8Validation != nil { + return *x.Utf8Validation + } + return FeatureSet_UTF8_VALIDATION_UNKNOWN +} + +func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding { + if x != nil && x.MessageEncoding != nil { + return *x.MessageEncoding + } + return FeatureSet_MESSAGE_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { + if x != nil && x.JsonFormat != nil { + return *x.JsonFormat + } + return FeatureSet_JSON_FORMAT_UNKNOWN +} + +// A compiled specification for the defaults of a set of features. These +// messages are generated from FeatureSet extensions and can be used to seed +// feature resolution. The resolution with this object becomes a simple search +// for the closest matching edition, followed by proto merges. +type FeatureSetDefaults struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` + // The minimum supported edition (inclusive) when this was constructed. + // Editions before this will not have defaults. + MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"` + // The maximum known edition (inclusive) when this was constructed. Editions + // after this will not have reliable defaults. + MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` +} + +func (x *FeatureSetDefaults) Reset() { + *x = FeatureSetDefaults{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults) ProtoMessage() {} + +func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (x *UninterpretedOption) GetDoubleValue() float64 { - if x != nil && x.DoubleValue != nil { - return *x.DoubleValue - } - return 0 +// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} } -func (x *UninterpretedOption) GetStringValue() []byte { +func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault { if x != nil { - return x.StringValue + return x.Defaults } return nil } -func (x *UninterpretedOption) GetAggregateValue() string { - if x != nil && x.AggregateValue != nil { - return *x.AggregateValue +func (x *FeatureSetDefaults) GetMinimumEdition() Edition { + if x != nil && x.MinimumEdition != nil { + return *x.MinimumEdition } - return "" + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults) GetMaximumEdition() Edition { + if x != nil && x.MaximumEdition != nil { + return *x.MaximumEdition + } + return Edition_EDITION_UNKNOWN } // Encapsulates information about the original source file from which a @@ -2855,7 +3553,7 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2868,7 +3566,7 @@ func (x *SourceCodeInfo) String() string { func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2881,7 +3579,7 @@ func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21} } func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { @@ -2907,7 +3605,7 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2920,7 +3618,7 @@ func (x *GeneratedCodeInfo) String() string { func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2933,7 +3631,7 @@ func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22} } func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { @@ -2956,7 +3654,7 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2969,7 +3667,7 @@ func (x *DescriptorProto_ExtensionRange) String() string { func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3021,7 +3719,7 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3034,7 +3732,7 @@ func (x *DescriptorProto_ReservedRange) String() string { func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3078,10 +3776,6 @@ type ExtensionRangeOptions_Declaration struct { // Metadata.type, Declaration.type must have a leading dot for messages // and enums. Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` - // Deprecated. Please use "repeated". - // - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` // If true, indicates that the number is reserved in the extension range, // and any extension field with the number will fail to compile. Set this // when a declared extension field is deleted. @@ -3094,7 +3788,7 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3107,7 +3801,7 @@ func (x *ExtensionRangeOptions_Declaration) String() string { func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3144,14 +3838,6 @@ func (x *ExtensionRangeOptions_Declaration) GetType() string { return "" } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { - if x != nil && x.IsRepeated != nil { - return *x.IsRepeated - } - return false -} - func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { if x != nil && x.Reserved != nil { return *x.Reserved @@ -3184,7 +3870,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3197,7 +3883,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3227,6 +3913,61 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { return 0 } +type FieldOptions_EditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. +} + +func (x *FieldOptions_EditionDefault) Reset() { + *x = FieldOptions_EditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_EditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_EditionDefault) ProtoMessage() {} + +func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead. +func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *FieldOptions_EditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_EditionDefault) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value + } + return "" +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -3244,7 +3985,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3257,7 +3998,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3287,6 +4028,65 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +// A map from every known edition with a unique set of defaults to its +// defaults. Not all editions may be contained here. For a given edition, +// the defaults at the closest matching edition ordered at or before it should +// be used. This field must be in strict ascending order by edition. +type FeatureSetDefaults_FeatureSetEditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { + *x = FeatureSetDefaults_FeatureSetEditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + type SourceCodeInfo_Location struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3296,7 +4096,7 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition occurs. + // the root FileDescriptorProto to the place where the definition appears. // For example, this path: // // [ 4, 3, 2, 7, 1 ] @@ -3388,7 +4188,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3401,7 +4201,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3414,7 +4214,7 @@ func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0} } func (x *SourceCodeInfo_Location) GetPath() []int32 { @@ -3475,7 +4275,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3488,7 +4288,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3501,7 +4301,7 @@ func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0} } func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -3550,7 +4350,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3588,250 +4388,250 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, + 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, - 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, - 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, + 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, + 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, + 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, + 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, + 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, + 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, + 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, + 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, - 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, - 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, - 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, - 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, - 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, - 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, - 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, - 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, - 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, - 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, - 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, - 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, - 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, - 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, - 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, - 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, - 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, - 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, - 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, - 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, + 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, - 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, - 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, - 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, - 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, - 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, - 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, - 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, - 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, - 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, + 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, + 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, + 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, + 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, + 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, @@ -3856,259 +4656,419 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, - 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, - 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, - 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, - 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, - 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, - 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, - 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, + 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, + 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, + 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, + 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, + 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, + 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, + 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, - 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, - 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, - 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, - 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, - 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, - 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, - 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, - 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, - 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, - 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, + 0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, + 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, + 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, + 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, + 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, + 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, + 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, + 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, + 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, + 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, + 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, - 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, - 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, - 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, - 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, - 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, - 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, + 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, + 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, + 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, + 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, + 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, + 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, + 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, + 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, + 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, + 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, - 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, + 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, + 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, + 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, + 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, + 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, + 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, + 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a, + 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, + 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, + 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, + 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, + 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, + 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, + 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, + 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, + 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, + 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, + 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, + 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, + 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, + 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, + 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, + 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, + 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, + 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, + 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, + 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, + 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, + 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07, + 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, + 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, + 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, + 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, + 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, + 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, + 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, + 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, + 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, + 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, + 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, + 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, + 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, + 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, + 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, + 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, + 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -4123,103 +5083,136 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel - (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 20: google.protobuf.FileOptions - (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat + (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 27: google.protobuf.FileOptions + (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault + (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType - 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 49, // [49:49] is the sub-list for method output_type - 49, // [49:49] is the sub-list for method input_type - 49, // [49:49] is the sub-list for extension type_name - 49, // [49:49] is the sub-list for extension extendee - 0, // [0:49] is the sub-list for field type_name + 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition + 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet + 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 71, // [71:71] is the sub-list for method output_type + 71, // [71:71] is the sub-list for method input_type + 71, // [71:71] is the sub-list for extension type_name + 71, // [71:71] is the sub-list for extension extendee + 0, // [0:71] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4475,19 +5468,21 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo); i { + switch v := v.(*FeatureSet); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields + case 3: + return &v.extensionFields default: return nil } } file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GeneratedCodeInfo); i { + switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state case 1: @@ -4499,7 +5494,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ExtensionRange); i { + switch v := v.(*SourceCodeInfo); i { case 0: return &v.state case 1: @@ -4511,7 +5506,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ReservedRange); i { + switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state case 1: @@ -4523,7 +5518,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRangeOptions_Declaration); i { + switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state case 1: @@ -4535,7 +5530,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state case 1: @@ -4547,7 +5542,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state case 1: @@ -4559,7 +5554,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state case 1: @@ -4571,6 +5566,54 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions_EditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -4588,8 +5631,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 10, - NumMessages: 28, + NumEnums: 17, + NumMessages: 32, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go new file mode 100644 index 00000000..25de5ae0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -0,0 +1,177 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: reflect/protodesc/proto/go_features.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +type GoFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` +} + +func (x *GoFeatures) Reset() { + *x = GoFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoFeatures) ProtoMessage() {} + +func (x *GoFeatures) ProtoReflect() protoreflect.Message { + mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. +func (*GoFeatures) Descriptor() ([]byte, []int) { + return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0} +} + +func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { + if x != nil && x.LegacyUnmarshalJsonEnum != nil { + return *x.LegacyUnmarshalJsonEnum + } + return false +} + +var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FeatureSet)(nil), + ExtensionType: (*GoFeatures)(nil), + Field: 1002, + Name: "google.protobuf.go", + Tag: "bytes,1002,opt,name=go", + Filename: "reflect/protodesc/proto/go_features.proto", + }, +} + +// Extension fields to descriptorpb.FeatureSet. +var ( + // optional google.protobuf.GoFeatures go = 1002; + E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0] +) + +var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor + +var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64, + 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, + 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, + 0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, + 0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, + 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f, + 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, +} + +var ( + file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once + file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc +) + +func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte { + file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() { + file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData) + }) + return file_reflect_protodesc_proto_go_features_proto_rawDescData +} + +var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{ + (*GoFeatures)(nil), // 0: google.protobuf.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet +} +var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ + 1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet + 0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_reflect_protodesc_proto_go_features_proto_init() } +func file_reflect_protodesc_proto_go_features_proto_init() { + if File_reflect_protodesc_proto_go_features_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_reflect_protodesc_proto_go_features_proto_goTypes, + DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs, + MessageInfos: file_reflect_protodesc_proto_go_features_proto_msgTypes, + ExtensionInfos: file_reflect_protodesc_proto_go_features_proto_extTypes, + }.Build() + File_reflect_protodesc_proto_go_features_proto = out.File + file_reflect_protodesc_proto_go_features_proto_rawDesc = nil + file_reflect_protodesc_proto_go_features_proto_goTypes = nil + file_reflect_protodesc_proto_go_features_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto new file mode 100644 index 00000000..d2465712 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto @@ -0,0 +1,28 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +syntax = "proto2"; + +package google.protobuf; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/protobuf/types/gofeaturespb"; + +extend google.protobuf.FeatureSet { + optional GoFeatures go = 1002; +} + +message GoFeatures { + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + optional bool legacy_unmarshal_json_enum = 1 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_ENUM, + edition_defaults = { edition: EDITION_PROTO2, value: "true" }, + edition_defaults = { edition: EDITION_PROTO3, value: "false" } + ]; +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 580b232f..9de51be5 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -237,7 +237,8 @@ type Any struct { // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. diff --git a/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go index f81594c9..1395a7e1 100644 --- a/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AdmissionRequest = map[string]string{ diff --git a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go index 13067ad8..82598ed5 100644 --- a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AdmissionRequest = map[string]string{ diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go index 6ac9e80f..9a2d0bcc 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go @@ -44,10 +44,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{0} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{0} + return fileDescriptor_aaac5994f79683e8, []int{1} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +103,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{1} + return fileDescriptor_aaac5994f79683e8, []int{2} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +131,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{2} + return fileDescriptor_aaac5994f79683e8, []int{3} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +159,7 @@ var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo func (m *Rule) Reset() { *m = Rule{} } func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{3} + return fileDescriptor_aaac5994f79683e8, []int{4} } func (m *Rule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +187,7 @@ var xxx_messageInfo_Rule proto.InternalMessageInfo func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } func (*RuleWithOperations) ProtoMessage() {} func (*RuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{4} + return fileDescriptor_aaac5994f79683e8, []int{5} } func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +215,7 @@ var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{5} + return fileDescriptor_aaac5994f79683e8, []int{6} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +243,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{6} + return fileDescriptor_aaac5994f79683e8, []int{7} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +271,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{7} + return fileDescriptor_aaac5994f79683e8, []int{8} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +299,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{8} + return fileDescriptor_aaac5994f79683e8, []int{9} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +327,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{9} + return fileDescriptor_aaac5994f79683e8, []int{10} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -325,6 +353,7 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1.MatchCondition") proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfigurationList") @@ -342,79 +371,116 @@ func init() { } var fileDescriptor_aaac5994f79683e8 = []byte{ - // 1105 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0x76, 0x63, 0x8f, 0xf3, 0xa7, 0x19, 0xa0, 0x35, 0xa1, 0xf2, 0x5a, 0xae, 0x84, - 0x8c, 0x80, 0xdd, 0x26, 0x94, 0x52, 0x71, 0x41, 0xd9, 0xf0, 0x47, 0x11, 0x49, 0x1b, 0x4d, 0xda, - 0x14, 0xa1, 0x1c, 0x3a, 0x5e, 0x8f, 0xed, 0x21, 0xf6, 0xce, 0x6a, 0x66, 0xd6, 0x90, 0x1b, 0x1f, - 0x81, 0xaf, 0x00, 0x9f, 0x82, 0x1b, 0xe2, 0x96, 0x63, 0x8f, 0x39, 0xa0, 0x85, 0x2c, 0x17, 0x0e, - 0x7c, 0x82, 0x9c, 0xd0, 0xcc, 0xae, 0x77, 0xfd, 0x27, 0x09, 0x56, 0x0e, 0x3d, 0xe5, 0xe6, 0xf9, - 0xbd, 0x79, 0xbf, 0x37, 0xef, 0xed, 0x7b, 0xef, 0x27, 0x83, 0x9d, 0xa3, 0xc7, 0xc2, 0xa2, 0xcc, - 0x3e, 0x0a, 0x9a, 0x84, 0x7b, 0x44, 0x12, 0x61, 0x0f, 0x88, 0xd7, 0x62, 0xdc, 0x4e, 0x0c, 0xd8, - 0xa7, 0x36, 0x6e, 0xf5, 0xa9, 0x10, 0x94, 0x79, 0x9c, 0x74, 0xa8, 0x90, 0x1c, 0x4b, 0xca, 0x3c, - 0x7b, 0xb0, 0x6e, 0x77, 0x88, 0x47, 0x38, 0x96, 0xa4, 0x65, 0xf9, 0x9c, 0x49, 0x06, 0xef, 0xc7, - 0x4e, 0x16, 0xf6, 0xa9, 0x75, 0xa1, 0x93, 0x35, 0x58, 0x5f, 0xfb, 0xb0, 0x43, 0x65, 0x37, 0x68, - 0x5a, 0x2e, 0xeb, 0xdb, 0x1d, 0xd6, 0x61, 0xb6, 0xf6, 0x6d, 0x06, 0x6d, 0x7d, 0xd2, 0x07, 0xfd, - 0x2b, 0xe6, 0x5c, 0x7b, 0x98, 0x3d, 0xa4, 0x8f, 0xdd, 0x2e, 0xf5, 0x08, 0x3f, 0xb6, 0xfd, 0xa3, - 0x8e, 0x02, 0x84, 0xdd, 0x27, 0x12, 0x5f, 0xf0, 0x92, 0x35, 0xfb, 0x32, 0x2f, 0x1e, 0x78, 0x92, - 0xf6, 0xc9, 0x94, 0xc3, 0xa3, 0xff, 0x73, 0x10, 0x6e, 0x97, 0xf4, 0xf1, 0xa4, 0x5f, 0xfd, 0xb7, - 0x05, 0xb0, 0xb2, 0x1b, 0x48, 0x2c, 0xa9, 0xd7, 0x79, 0x41, 0x9a, 0x5d, 0xc6, 0x8e, 0x60, 0x0d, - 0xe4, 0x3d, 0xdc, 0x27, 0x15, 0xa3, 0x66, 0x34, 0x4a, 0xce, 0xe2, 0x49, 0x68, 0xce, 0x45, 0xa1, - 0x99, 0x7f, 0x82, 0xfb, 0x04, 0x69, 0x0b, 0xe4, 0x60, 0xd1, 0xed, 0x51, 0xe2, 0xc9, 0x2d, 0xe6, - 0xb5, 0x69, 0xa7, 0x32, 0x5f, 0x33, 0x1a, 0xe5, 0x8d, 0xc7, 0xd6, 0x0c, 0xf5, 0xb3, 0x92, 0x28, - 0x5b, 0x23, 0xfe, 0xce, 0x9b, 0x49, 0x8c, 0xc5, 0x51, 0x14, 0x8d, 0xc5, 0x80, 0x87, 0xa0, 0xc0, - 0x83, 0x1e, 0x11, 0x95, 0x5c, 0x2d, 0xd7, 0x28, 0x6f, 0x7c, 0x32, 0x53, 0x30, 0x14, 0xf4, 0xc8, - 0x0b, 0x2a, 0xbb, 0x4f, 0x7d, 0x12, 0x83, 0xc2, 0x59, 0x4a, 0x62, 0x15, 0x94, 0x4d, 0xa0, 0x98, - 0x14, 0xee, 0x80, 0xa5, 0x36, 0xa6, 0xbd, 0x80, 0x93, 0x3d, 0xd6, 0xa3, 0xee, 0x71, 0x25, 0xaf, - 0x93, 0x7f, 0x37, 0x0a, 0xcd, 0xa5, 0x2f, 0x47, 0x0d, 0xe7, 0xa1, 0xb9, 0x3a, 0x06, 0x3c, 0x3b, - 0xf6, 0x09, 0x1a, 0x77, 0x86, 0x9f, 0x83, 0x72, 0x1f, 0x4b, 0xb7, 0x9b, 0x70, 0x95, 0x34, 0x57, - 0x3d, 0x0a, 0xcd, 0xf2, 0x6e, 0x06, 0x9f, 0x87, 0xe6, 0xca, 0xc8, 0x51, 0xf3, 0x8c, 0xba, 0xc1, - 0x1f, 0xc0, 0xaa, 0xaa, 0xb6, 0xf0, 0xb1, 0x4b, 0xf6, 0x49, 0x8f, 0xb8, 0x92, 0xf1, 0x4a, 0x41, - 0x97, 0xfa, 0xa3, 0x91, 0xec, 0xd3, 0xef, 0x6d, 0xf9, 0x47, 0x1d, 0x05, 0x08, 0x4b, 0xb5, 0x95, - 0x4a, 0x7f, 0x07, 0x37, 0x49, 0x6f, 0xe8, 0xea, 0xbc, 0x15, 0x85, 0xe6, 0xea, 0x93, 0x49, 0x46, - 0x34, 0x1d, 0x04, 0x32, 0xb0, 0xcc, 0x9a, 0xdf, 0x11, 0x57, 0xa6, 0x61, 0xcb, 0xd7, 0x0f, 0x0b, - 0xa3, 0xd0, 0x5c, 0x7e, 0x3a, 0x46, 0x87, 0x26, 0xe8, 0x55, 0xc1, 0x04, 0x6d, 0x91, 0x2f, 0xda, - 0x6d, 0xe2, 0x4a, 0x51, 0xb9, 0x95, 0x15, 0x6c, 0x3f, 0x83, 0x55, 0xc1, 0xb2, 0xe3, 0x56, 0x0f, - 0x0b, 0x81, 0x46, 0xdd, 0xe0, 0xa7, 0x60, 0x59, 0xf5, 0x3a, 0x0b, 0xe4, 0x3e, 0x71, 0x99, 0xd7, - 0x12, 0x95, 0x85, 0x9a, 0xd1, 0x28, 0xc4, 0x2f, 0x78, 0x36, 0x66, 0x41, 0x13, 0x37, 0xe1, 0x73, - 0x70, 0x37, 0xed, 0x22, 0x44, 0x06, 0x94, 0x7c, 0x7f, 0x40, 0xb8, 0x3a, 0x88, 0x4a, 0xb1, 0x96, - 0x6b, 0x94, 0x9c, 0x77, 0xa2, 0xd0, 0xbc, 0xbb, 0x79, 0xf1, 0x15, 0x74, 0x99, 0x2f, 0x7c, 0x09, - 0x20, 0x27, 0xd4, 0x1b, 0x30, 0x57, 0xb7, 0x5f, 0xd2, 0x10, 0x40, 0xe7, 0xf7, 0x20, 0x0a, 0x4d, - 0x88, 0xa6, 0xac, 0xe7, 0xa1, 0x79, 0x67, 0x1a, 0xd5, 0xed, 0x71, 0x01, 0x57, 0xfd, 0xd4, 0x00, - 0xf7, 0x26, 0x26, 0x38, 0x9e, 0x98, 0x20, 0xee, 0x78, 0xf8, 0x12, 0x14, 0xd5, 0x87, 0x69, 0x61, - 0x89, 0xf5, 0x48, 0x97, 0x37, 0x1e, 0xcc, 0xf6, 0x19, 0xe3, 0x6f, 0xb6, 0x4b, 0x24, 0x76, 0x60, - 0x32, 0x34, 0x20, 0xc3, 0x50, 0xca, 0x0a, 0x0f, 0x40, 0x31, 0x89, 0x2c, 0x2a, 0xf3, 0x7a, 0x3a, - 0x1f, 0xce, 0x34, 0x9d, 0x13, 0xcf, 0x76, 0xf2, 0x2a, 0x0a, 0x4a, 0xb9, 0xea, 0xff, 0x18, 0xa0, - 0x76, 0x55, 0x6a, 0x3b, 0x54, 0x48, 0x78, 0x38, 0x95, 0x9e, 0x35, 0x63, 0x97, 0x52, 0x11, 0x27, - 0x77, 0x3b, 0x49, 0xae, 0x38, 0x44, 0x46, 0x52, 0x6b, 0x83, 0x02, 0x95, 0xa4, 0x3f, 0xcc, 0x6b, - 0xf3, 0x3a, 0x79, 0x8d, 0xbd, 0x39, 0xdb, 0x3f, 0xdb, 0x8a, 0x17, 0xc5, 0xf4, 0xf5, 0xdf, 0x0d, - 0x90, 0x57, 0x0b, 0x09, 0xbe, 0x0f, 0x4a, 0xd8, 0xa7, 0x5f, 0x71, 0x16, 0xf8, 0xa2, 0x62, 0xe8, - 0xce, 0x5b, 0x8a, 0x42, 0xb3, 0xb4, 0xb9, 0xb7, 0x1d, 0x83, 0x28, 0xb3, 0xc3, 0x75, 0x50, 0xc6, - 0x3e, 0x4d, 0x1b, 0x75, 0x5e, 0x5f, 0x5f, 0x51, 0x63, 0xb3, 0xb9, 0xb7, 0x9d, 0x36, 0xe7, 0xe8, - 0x1d, 0xc5, 0xcf, 0x89, 0x60, 0x01, 0x77, 0x93, 0x55, 0x9a, 0xf0, 0xa3, 0x21, 0x88, 0x32, 0x3b, - 0xfc, 0x00, 0x14, 0x84, 0xcb, 0x7c, 0x92, 0x6c, 0xc3, 0x3b, 0xea, 0xd9, 0xfb, 0x0a, 0x38, 0x0f, - 0xcd, 0x92, 0xfe, 0xa1, 0xdb, 0x32, 0xbe, 0x54, 0xff, 0xc5, 0x00, 0x70, 0x7a, 0xe1, 0xc2, 0xcf, - 0x00, 0x60, 0xe9, 0x29, 0x49, 0xc9, 0xd4, 0xbd, 0x94, 0xa2, 0xe7, 0xa1, 0xb9, 0x94, 0x9e, 0x34, - 0xe5, 0x88, 0x0b, 0xfc, 0x1a, 0xe4, 0xd5, 0x92, 0x4e, 0x54, 0xe6, 0xbd, 0x99, 0x17, 0x7f, 0x26, - 0x5d, 0xea, 0x84, 0x34, 0x49, 0xfd, 0x67, 0x03, 0xdc, 0xde, 0x27, 0x7c, 0x40, 0x5d, 0x82, 0x48, - 0x9b, 0x70, 0xe2, 0xb9, 0x04, 0xda, 0xa0, 0x94, 0x2e, 0xc1, 0x44, 0xf6, 0x56, 0x13, 0xdf, 0x52, - 0xba, 0x30, 0x51, 0x76, 0x27, 0x95, 0xc8, 0xf9, 0x4b, 0x25, 0xf2, 0x1e, 0xc8, 0xfb, 0x58, 0x76, - 0x2b, 0x39, 0x7d, 0xa3, 0xa8, 0xac, 0x7b, 0x58, 0x76, 0x91, 0x46, 0xb5, 0x95, 0x71, 0xa9, 0xeb, - 0x5a, 0x48, 0xac, 0x8c, 0x4b, 0xa4, 0xd1, 0xfa, 0x9f, 0xb7, 0xc0, 0xea, 0x01, 0xee, 0xd1, 0xd6, - 0x8d, 0x2c, 0xdf, 0xc8, 0xf2, 0x95, 0xb2, 0x0c, 0x6e, 0x64, 0xf9, 0x3a, 0xb2, 0x5c, 0xff, 0xc3, - 0x00, 0xd5, 0xa9, 0x09, 0x7b, 0xdd, 0xb2, 0xf9, 0xcd, 0x94, 0x6c, 0x3e, 0x9a, 0x69, 0x7a, 0xa6, - 0x1e, 0x3e, 0x25, 0x9c, 0xff, 0x1a, 0xa0, 0x7e, 0x75, 0x7a, 0xaf, 0x41, 0x3a, 0xbb, 0xe3, 0xd2, - 0xb9, 0x75, 0xbd, 0xdc, 0x66, 0x11, 0xcf, 0x5f, 0x0d, 0xf0, 0xc6, 0x05, 0xfb, 0x0b, 0xbe, 0x0d, - 0x72, 0x01, 0xef, 0x25, 0x2b, 0x78, 0x21, 0x0a, 0xcd, 0xdc, 0x73, 0xb4, 0x83, 0x14, 0x06, 0x0f, - 0xc1, 0x82, 0x88, 0x55, 0x20, 0xc9, 0xfc, 0xe3, 0x99, 0x9e, 0x37, 0xa9, 0x1c, 0x4e, 0x39, 0x0a, - 0xcd, 0x85, 0x21, 0x3a, 0xa4, 0x84, 0x0d, 0x50, 0x74, 0xb1, 0x13, 0x78, 0xad, 0x44, 0xb5, 0x16, - 0x9d, 0x45, 0x55, 0xa4, 0xad, 0xcd, 0x18, 0x43, 0xa9, 0xd5, 0xd9, 0x3e, 0x39, 0xab, 0xce, 0xbd, - 0x3a, 0xab, 0xce, 0x9d, 0x9e, 0x55, 0xe7, 0x7e, 0x8c, 0xaa, 0xc6, 0x49, 0x54, 0x35, 0x5e, 0x45, - 0x55, 0xe3, 0x34, 0xaa, 0x1a, 0x7f, 0x45, 0x55, 0xe3, 0xa7, 0xbf, 0xab, 0x73, 0xdf, 0xde, 0x9f, - 0xe1, 0xdf, 0xec, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x43, 0x44, 0x86, 0xf5, 0x0c, 0x0f, 0x00, + // 1169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xce, 0xc6, 0x36, 0xb1, 0xc7, 0x4e, 0xd2, 0x0c, 0xd0, 0x2e, 0xa5, 0xf2, 0x5a, 0xae, 0x84, + 0x82, 0x00, 0x6f, 0x9b, 0x96, 0x52, 0x71, 0x41, 0xb1, 0x29, 0x28, 0x22, 0x69, 0xa3, 0x49, 0x3f, + 0x10, 0xea, 0xa1, 0xe3, 0xf5, 0xd8, 0x1e, 0x62, 0xef, 0xac, 0x66, 0x66, 0x4d, 0x7b, 0xe3, 0x27, + 0xf0, 0x17, 0xe0, 0x4f, 0xc0, 0x95, 0x5b, 0x8f, 0xbd, 0x91, 0x03, 0x5a, 0x91, 0xe5, 0xc2, 0x81, + 0x5f, 0x90, 0x13, 0x9a, 0xd9, 0xf5, 0xae, 0xbf, 0x12, 0x56, 0x39, 0xe4, 0x94, 0x5b, 0xe6, 0x79, + 0xdf, 0xf7, 0x79, 0xe7, 0x19, 0xbf, 0x1f, 0xab, 0x80, 0xdd, 0xc3, 0xfb, 0xa2, 0x41, 0x99, 0x7d, + 0xe8, 0xb7, 0x09, 0x77, 0x89, 0x24, 0xc2, 0x1e, 0x11, 0xb7, 0xc3, 0xb8, 0x1d, 0x1b, 0xb0, 0x47, + 0x6d, 0xdc, 0x19, 0x52, 0x21, 0x28, 0x73, 0x39, 0xe9, 0x51, 0x21, 0x39, 0x96, 0x94, 0xb9, 0xf6, + 0xe8, 0xb6, 0xdd, 0x23, 0x2e, 0xe1, 0x58, 0x92, 0x4e, 0xc3, 0xe3, 0x4c, 0x32, 0x78, 0x33, 0x0a, + 0x6a, 0x60, 0x8f, 0x36, 0x16, 0x06, 0x35, 0x46, 0xb7, 0xaf, 0x7f, 0xd2, 0xa3, 0xb2, 0xef, 0xb7, + 0x1b, 0x0e, 0x1b, 0xda, 0x3d, 0xd6, 0x63, 0xb6, 0x8e, 0x6d, 0xfb, 0x5d, 0x7d, 0xd2, 0x07, 0xfd, + 0x57, 0xc4, 0x79, 0xfd, 0x6e, 0x7a, 0x91, 0x21, 0x76, 0xfa, 0xd4, 0x25, 0xfc, 0x95, 0xed, 0x1d, + 0xf6, 0x14, 0x20, 0xec, 0x21, 0x91, 0x78, 0xc1, 0x4d, 0xae, 0xdb, 0xa7, 0x45, 0x71, 0xdf, 0x95, + 0x74, 0x48, 0xe6, 0x02, 0xee, 0xfd, 0x5f, 0x80, 0x70, 0xfa, 0x64, 0x88, 0x67, 0xe3, 0xea, 0x5d, + 0xb0, 0xb6, 0x87, 0xa5, 0xd3, 0x6f, 0x31, 0xb7, 0x43, 0x95, 0x44, 0x58, 0x03, 0x79, 0x17, 0x0f, + 0x89, 0x69, 0xd4, 0x8c, 0xcd, 0x52, 0xb3, 0xf2, 0x3a, 0xb0, 0x96, 0xc2, 0xc0, 0xca, 0x3f, 0xc4, + 0x43, 0x82, 0xb4, 0x05, 0x6e, 0x01, 0x40, 0x5e, 0x7a, 0x9c, 0xe8, 0xe7, 0x31, 0x97, 0xb5, 0x1f, + 0x8c, 0xfd, 0xc0, 0x83, 0xc4, 0x82, 0x26, 0xbc, 0xea, 0xbf, 0x16, 0xc1, 0xfa, 0x9e, 0x2f, 0xb1, + 0xa4, 0x6e, 0xef, 0x19, 0x69, 0xf7, 0x19, 0x3b, 0xcc, 0x90, 0x89, 0x83, 0x8a, 0x33, 0xa0, 0xc4, + 0x95, 0x2d, 0xe6, 0x76, 0x69, 0x4f, 0xe7, 0x2a, 0x6f, 0xdd, 0x6f, 0x64, 0xf8, 0x9d, 0x1a, 0x71, + 0x96, 0xd6, 0x44, 0x7c, 0xf3, 0x9d, 0x38, 0x47, 0x65, 0x12, 0x45, 0x53, 0x39, 0xe0, 0x73, 0x50, + 0xe0, 0xfe, 0x80, 0x08, 0x33, 0x57, 0xcb, 0x6d, 0x96, 0xb7, 0x3e, 0xcb, 0x94, 0x0c, 0xf9, 0x03, + 0xf2, 0x8c, 0xca, 0xfe, 0x23, 0x8f, 0x44, 0xa0, 0x68, 0xae, 0xc6, 0xb9, 0x0a, 0xca, 0x26, 0x50, + 0x44, 0x0a, 0x77, 0xc1, 0x6a, 0x17, 0xd3, 0x81, 0xcf, 0xc9, 0x3e, 0x1b, 0x50, 0xe7, 0x95, 0x99, + 0xd7, 0xe2, 0x3f, 0x08, 0x03, 0x6b, 0xf5, 0xab, 0x49, 0xc3, 0x49, 0x60, 0x6d, 0x4c, 0x01, 0x8f, + 0x5f, 0x79, 0x04, 0x4d, 0x07, 0xc3, 0x2f, 0x41, 0x79, 0xa8, 0x7e, 0xbd, 0x98, 0xab, 0xa4, 0xb9, + 0xea, 0x61, 0x60, 0x95, 0xf7, 0x52, 0xf8, 0x24, 0xb0, 0xd6, 0x27, 0x8e, 0x9a, 0x67, 0x32, 0x0c, + 0xbe, 0x04, 0x1b, 0xea, 0xb5, 0x85, 0x87, 0x1d, 0x72, 0x40, 0x06, 0xc4, 0x91, 0x8c, 0x9b, 0x05, + 0xfd, 0xd4, 0x77, 0x26, 0xd4, 0x27, 0x75, 0xd5, 0xf0, 0x0e, 0x7b, 0x0a, 0x10, 0x0d, 0x55, 0xbe, + 0x4a, 0xfe, 0x2e, 0x6e, 0x93, 0xc1, 0x38, 0xb4, 0xf9, 0x6e, 0x18, 0x58, 0x1b, 0x0f, 0x67, 0x19, + 0xd1, 0x7c, 0x12, 0xc8, 0xc0, 0x1a, 0x6b, 0x7f, 0x4f, 0x1c, 0x99, 0xa4, 0x2d, 0x9f, 0x3f, 0x2d, + 0x0c, 0x03, 0x6b, 0xed, 0xd1, 0x14, 0x1d, 0x9a, 0xa1, 0x57, 0x0f, 0x26, 0x68, 0x87, 0x3c, 0xe8, + 0x76, 0x89, 0x23, 0x85, 0xf9, 0x56, 0xfa, 0x60, 0x07, 0x29, 0xac, 0x1e, 0x2c, 0x3d, 0xb6, 0x06, + 0x58, 0x08, 0x34, 0x19, 0x06, 0x3f, 0x07, 0x6b, 0xaa, 0xa7, 0x98, 0x2f, 0x0f, 0x88, 0xc3, 0xdc, + 0x8e, 0x30, 0x57, 0x6a, 0xc6, 0x66, 0x21, 0xba, 0xc1, 0xe3, 0x29, 0x0b, 0x9a, 0xf1, 0x84, 0x4f, + 0xc0, 0xb5, 0xa4, 0x8a, 0x10, 0x19, 0x51, 0xf2, 0xc3, 0x53, 0xc2, 0xd5, 0x41, 0x98, 0xc5, 0x5a, + 0x6e, 0xb3, 0xd4, 0x7c, 0x3f, 0x0c, 0xac, 0x6b, 0xdb, 0x8b, 0x5d, 0xd0, 0x69, 0xb1, 0xf0, 0x05, + 0x80, 0x9c, 0x50, 0x77, 0xc4, 0x1c, 0x5d, 0x7e, 0x71, 0x41, 0x00, 0xad, 0xef, 0x56, 0x18, 0x58, + 0x10, 0xcd, 0x59, 0x4f, 0x02, 0xeb, 0xea, 0x3c, 0xaa, 0xcb, 0x63, 0x01, 0x17, 0x1c, 0x81, 0xf5, + 0xe1, 0xd4, 0xa4, 0x10, 0x66, 0x45, 0x77, 0xc8, 0x9d, 0x4c, 0x1d, 0x32, 0x3d, 0x65, 0x9a, 0xd7, + 0xe2, 0xee, 0x58, 0x9f, 0xc6, 0x05, 0x9a, 0x4d, 0x52, 0x3f, 0x32, 0xc0, 0x8d, 0x99, 0xc9, 0x11, + 0x75, 0xaa, 0x1f, 0x91, 0xc3, 0x17, 0xa0, 0xa8, 0x0a, 0xa2, 0x83, 0x25, 0xd6, 0xa3, 0xa4, 0xbc, + 0x75, 0x2b, 0x5b, 0xf9, 0x44, 0xb5, 0xb2, 0x47, 0x24, 0x4e, 0xc7, 0x57, 0x8a, 0xa1, 0x84, 0x15, + 0x3e, 0x05, 0xc5, 0x38, 0xb3, 0x30, 0x97, 0xb5, 0xe6, 0xbb, 0xd9, 0x34, 0x4f, 0x5f, 0xbb, 0x99, + 0x57, 0x59, 0x50, 0xc2, 0x55, 0xff, 0xc7, 0x00, 0xb5, 0xb3, 0xa4, 0xed, 0x52, 0x21, 0xe1, 0xf3, + 0x39, 0x79, 0x8d, 0x8c, 0xdd, 0x41, 0x45, 0x24, 0xee, 0x4a, 0x2c, 0xae, 0x38, 0x46, 0x26, 0xa4, + 0x75, 0x41, 0x81, 0x4a, 0x32, 0x1c, 0xeb, 0xda, 0x3e, 0x8f, 0xae, 0xa9, 0x3b, 0xa7, 0x73, 0x6f, + 0x47, 0xf1, 0xa2, 0x88, 0xbe, 0xfe, 0xbb, 0x01, 0xf2, 0x6a, 0x10, 0xc2, 0x8f, 0x40, 0x09, 0x7b, + 0xf4, 0x6b, 0xce, 0x7c, 0x4f, 0x98, 0x86, 0xae, 0xf8, 0xd5, 0x30, 0xb0, 0x4a, 0xdb, 0xfb, 0x3b, + 0x11, 0x88, 0x52, 0x3b, 0xbc, 0x0d, 0xca, 0xd8, 0xa3, 0x49, 0x83, 0x2c, 0x6b, 0xf7, 0x75, 0xd5, + 0xae, 0xdb, 0xfb, 0x3b, 0x49, 0x53, 0x4c, 0xfa, 0x28, 0x7e, 0x4e, 0x04, 0xf3, 0xb9, 0x13, 0x8f, + 0xf0, 0x98, 0x1f, 0x8d, 0x41, 0x94, 0xda, 0xe1, 0xc7, 0xa0, 0x20, 0x1c, 0xe6, 0x91, 0x78, 0x0a, + 0x5f, 0x55, 0xd7, 0x3e, 0x50, 0xc0, 0x49, 0x60, 0x95, 0xf4, 0x1f, 0xba, 0x1d, 0x22, 0xa7, 0xfa, + 0x2f, 0x06, 0x80, 0xf3, 0x83, 0x1e, 0x7e, 0x01, 0x00, 0x4b, 0x4e, 0xb1, 0x24, 0x4b, 0xd7, 0x52, + 0x82, 0x9e, 0x04, 0xd6, 0x6a, 0x72, 0xd2, 0x94, 0x13, 0x21, 0xf0, 0x1b, 0x90, 0x57, 0xcb, 0x21, + 0xde, 0x6e, 0x1f, 0x66, 0x5e, 0x38, 0xe9, 0xca, 0x54, 0x27, 0xa4, 0x49, 0xea, 0x3f, 0x1b, 0xe0, + 0xca, 0x01, 0xe1, 0x23, 0xea, 0x10, 0x44, 0xba, 0x84, 0x13, 0xd7, 0x21, 0xd0, 0x06, 0xa5, 0x64, + 0xf8, 0xc6, 0xeb, 0x76, 0x23, 0x8e, 0x2d, 0x25, 0x83, 0x1a, 0xa5, 0x3e, 0xc9, 0x6a, 0x5e, 0x3e, + 0x75, 0x35, 0xdf, 0x00, 0x79, 0x0f, 0xcb, 0xbe, 0x99, 0xd3, 0x1e, 0x45, 0x65, 0xdd, 0xc7, 0xb2, + 0x8f, 0x34, 0xaa, 0xad, 0x8c, 0x4b, 0xfd, 0xae, 0x85, 0xd8, 0xca, 0xb8, 0x44, 0x1a, 0xad, 0xff, + 0xb1, 0x02, 0x36, 0x9e, 0xe2, 0x01, 0xed, 0x5c, 0x7e, 0x0e, 0x5c, 0x7e, 0x0e, 0x9c, 0xf9, 0x39, + 0x00, 0x2e, 0x3f, 0x07, 0xce, 0xf5, 0x39, 0xb0, 0x60, 0x59, 0x97, 0x2f, 0x62, 0x59, 0xff, 0x69, + 0x80, 0xea, 0x5c, 0x67, 0x5f, 0xf4, 0xba, 0xfe, 0x76, 0x6e, 0x5d, 0xdf, 0xcb, 0xa4, 0x7a, 0xee, + 0xe2, 0x73, 0x0b, 0xfb, 0x5f, 0x03, 0xd4, 0xcf, 0x96, 0x77, 0x01, 0x2b, 0xbb, 0x3f, 0xbd, 0xb2, + 0x5b, 0xe7, 0xd3, 0x96, 0x65, 0x69, 0xff, 0x66, 0x80, 0xb7, 0x17, 0xcc, 0x4d, 0xf8, 0x1e, 0xc8, + 0xf9, 0x7c, 0x10, 0x8f, 0xfe, 0x95, 0x30, 0xb0, 0x72, 0x4f, 0xd0, 0x2e, 0x52, 0x18, 0x7c, 0x0e, + 0x56, 0x44, 0xb4, 0x7d, 0x62, 0xe5, 0x9f, 0x66, 0xba, 0xde, 0xec, 0xc6, 0x6a, 0x96, 0xc3, 0xc0, + 0x5a, 0x19, 0xa3, 0x63, 0x4a, 0xb8, 0x09, 0x8a, 0x0e, 0x6e, 0xfa, 0x6e, 0x27, 0xde, 0x96, 0x95, + 0x66, 0x45, 0x3d, 0x52, 0x6b, 0x3b, 0xc2, 0x50, 0x62, 0x6d, 0xee, 0xbc, 0x3e, 0xae, 0x2e, 0xbd, + 0x39, 0xae, 0x2e, 0x1d, 0x1d, 0x57, 0x97, 0x7e, 0x0c, 0xab, 0xc6, 0xeb, 0xb0, 0x6a, 0xbc, 0x09, + 0xab, 0xc6, 0x51, 0x58, 0x35, 0xfe, 0x0a, 0xab, 0xc6, 0x4f, 0x7f, 0x57, 0x97, 0xbe, 0xbb, 0x99, + 0xe1, 0xbf, 0x04, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xe1, 0x3a, 0x73, 0x64, 0x10, 0x00, 0x00, } +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -435,6 +501,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -791,6 +871,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -1036,6 +1130,19 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *MutatingWebhook) Size() (n int) { if m == nil { return 0 @@ -1085,6 +1192,12 @@ func (m *MutatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1235,6 +1348,12 @@ func (m *ValidatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1299,6 +1418,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} func (this *MutatingWebhook) String() string { if this == nil { return "nil" @@ -1308,6 +1438,11 @@ func (this *MutatingWebhook) String() string { repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&MutatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1320,6 +1455,7 @@ func (this *MutatingWebhook) String() string { `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1402,6 +1538,11 @@ func (this *ValidatingWebhook) String() string { repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&ValidatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1413,6 +1554,7 @@ func (this *ValidatingWebhook) String() string { `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1469,6 +1611,120 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1853,6 +2109,40 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2920,6 +3210,40 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto index aa266a2a..cdf1f476 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -28,6 +28,35 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1"; +// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook. +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { // The name of the admission webhook. @@ -173,6 +202,28 @@ message MutatingWebhook { // Defaults to "Never". // +optional optional string reinvocationPolicy = 10; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 12; } // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. @@ -409,6 +460,28 @@ message ValidatingWebhook { // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. repeated string admissionReviewVersions = 8; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 11; } // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go index e74b276f..74f17d54 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -307,6 +307,28 @@ type ValidatingWebhook struct { // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,opt,name=matchConditions"` } // MutatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -454,6 +476,28 @@ type MutatingWebhook struct { // Defaults to "Never". // +optional ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,opt,name=matchConditions"` } // ReinvocationPolicyType specifies what type of policy the admission hook uses. @@ -563,3 +607,32 @@ type ServiceReference struct { // +optional Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } + +// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook. +type MatchCondition struct { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go index ba92729c..ce306b30 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go @@ -24,9 +24,19 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MatchCondition = map[string]string{ + "": "MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.", + "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "expression": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", +} + +func (MatchCondition) SwaggerDoc() map[string]string { + return map_MatchCondition +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -40,6 +50,7 @@ var map_MutatingWebhook = map[string]string{ "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (MutatingWebhook) SwaggerDoc() map[string]string { @@ -111,6 +122,7 @@ var map_ValidatingWebhook = map[string]string{ "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (ValidatingWebhook) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go index cff7377a..b9560991 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go @@ -26,6 +26,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in @@ -77,6 +93,11 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = new(ReinvocationPolicyType) **out = **in } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -286,6 +307,11 @@ func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index a00f532d..74653502 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -45,10 +45,94 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } +func (*AuditAnnotation) ProtoMessage() {} +func (*AuditAnnotation) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{0} +} +func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditAnnotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditAnnotation.Merge(m, src) +} +func (m *AuditAnnotation) XXX_Size() int { + return m.Size() +} +func (m *AuditAnnotation) XXX_DiscardUnknown() { + xxx_messageInfo_AuditAnnotation.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo + +func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } +func (*ExpressionWarning) ProtoMessage() {} +func (*ExpressionWarning) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{1} +} +func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExpressionWarning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExpressionWarning.Merge(m, src) +} +func (m *ExpressionWarning) XXX_Size() int { + return m.Size() +} +func (m *ExpressionWarning) XXX_DiscardUnknown() { + xxx_messageInfo_ExpressionWarning.DiscardUnknown(m) +} + +var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo + +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{2} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + func (m *MatchResources) Reset() { *m = MatchResources{} } func (*MatchResources) ProtoMessage() {} func (*MatchResources) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{0} + return fileDescriptor_c3be8d256e3ae3cf, []int{3} } func (m *MatchResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +160,7 @@ var xxx_messageInfo_MatchResources proto.InternalMessageInfo func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } func (*NamedRuleWithOperations) ProtoMessage() {} func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{1} + return fileDescriptor_c3be8d256e3ae3cf, []int{4} } func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -104,7 +188,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo func (m *ParamKind) Reset() { *m = ParamKind{} } func (*ParamKind) ProtoMessage() {} func (*ParamKind) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{2} + return fileDescriptor_c3be8d256e3ae3cf, []int{5} } func (m *ParamKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +216,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo func (m *ParamRef) Reset() { *m = ParamRef{} } func (*ParamRef) ProtoMessage() {} func (*ParamRef) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{3} + return fileDescriptor_c3be8d256e3ae3cf, []int{6} } func (m *ParamRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -157,10 +241,38 @@ func (m *ParamRef) XXX_DiscardUnknown() { var xxx_messageInfo_ParamRef proto.InternalMessageInfo +func (m *TypeChecking) Reset() { *m = TypeChecking{} } +func (*TypeChecking) ProtoMessage() {} +func (*TypeChecking) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{7} +} +func (m *TypeChecking) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeChecking) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeChecking.Merge(m, src) +} +func (m *TypeChecking) XXX_Size() int { + return m.Size() +} +func (m *TypeChecking) XXX_DiscardUnknown() { + xxx_messageInfo_TypeChecking.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeChecking proto.InternalMessageInfo + func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } func (*ValidatingAdmissionPolicy) ProtoMessage() {} func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{4} + return fileDescriptor_c3be8d256e3ae3cf, []int{8} } func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +300,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{5} + return fileDescriptor_c3be8d256e3ae3cf, []int{9} } func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,7 +328,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{6} + return fileDescriptor_c3be8d256e3ae3cf, []int{10} } func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +356,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{7} + return fileDescriptor_c3be8d256e3ae3cf, []int{11} } func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -272,7 +384,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } func (*ValidatingAdmissionPolicyList) ProtoMessage() {} func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{8} + return fileDescriptor_c3be8d256e3ae3cf, []int{12} } func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -300,7 +412,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{9} + return fileDescriptor_c3be8d256e3ae3cf, []int{13} } func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -325,10 +437,38 @@ func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() { var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo +func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } +func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} +func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{14} +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo + func (m *Validation) Reset() { *m = Validation{} } func (*Validation) ProtoMessage() {} func (*Validation) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{10} + return fileDescriptor_c3be8d256e3ae3cf, []int{15} } func (m *Validation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,16 +494,21 @@ func (m *Validation) XXX_DiscardUnknown() { var xxx_messageInfo_Validation proto.InternalMessageInfo func init() { + proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1alpha1.AuditAnnotation") + proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExpressionWarning") + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchCondition") proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchResources") proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1alpha1.NamedRuleWithOperations") proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamKind") proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamRef") + proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1alpha1.TypeChecking") proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy") proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding") proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingList") proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec") proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyList") proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec") + proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus") proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1alpha1.Validation") } @@ -372,73 +517,194 @@ func init() { } var fileDescriptor_c3be8d256e3ae3cf = []byte{ - // 1054 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcd, 0x6e, 0x1c, 0x45, - 0x10, 0xf6, 0xc4, 0x9b, 0xc4, 0xdb, 0x1b, 0x3b, 0x76, 0xe3, 0x88, 0xc5, 0x82, 0xdd, 0xd5, 0x2a, - 0x42, 0xf6, 0x81, 0x19, 0xec, 0x04, 0x02, 0x27, 0x94, 0x21, 0x41, 0x44, 0xb1, 0x63, 0xab, 0x8d, - 0x12, 0x09, 0x11, 0x89, 0xf6, 0x4c, 0x7b, 0xb6, 0xb3, 0x3b, 0x3f, 0x4c, 0xf7, 0x58, 0xb6, 0x38, - 0x80, 0xc4, 0x0b, 0x70, 0xe0, 0x41, 0x38, 0x71, 0xe1, 0x05, 0x7c, 0xcc, 0xd1, 0x5c, 0x46, 0x78, - 0xb8, 0xc0, 0x0b, 0x80, 0xe4, 0x13, 0xea, 0x9e, 0x9e, 0xbf, 0xfd, 0xc1, 0xeb, 0x60, 0xe5, 0xb6, - 0x5d, 0x3f, 0xdf, 0x57, 0x55, 0x5d, 0x35, 0xd5, 0x0b, 0x50, 0xff, 0x23, 0xa6, 0x53, 0xdf, 0xe8, - 0x47, 0x7b, 0x24, 0xf4, 0x08, 0x27, 0xcc, 0x38, 0x20, 0x9e, 0xed, 0x87, 0x86, 0x52, 0xe0, 0x80, - 0x1a, 0xd8, 0x76, 0x29, 0x63, 0xd4, 0xf7, 0x42, 0xe2, 0x50, 0xc6, 0x43, 0xcc, 0xa9, 0xef, 0x19, - 0x07, 0xeb, 0x78, 0x10, 0xf4, 0xf0, 0xba, 0xe1, 0x10, 0x8f, 0x84, 0x98, 0x13, 0x5b, 0x0f, 0x42, - 0x9f, 0xfb, 0x70, 0x2d, 0x75, 0xd5, 0x71, 0x40, 0xf5, 0xb1, 0xae, 0x7a, 0xe6, 0xba, 0xf2, 0x9e, - 0x43, 0x79, 0x2f, 0xda, 0xd3, 0x2d, 0xdf, 0x35, 0x1c, 0xdf, 0xf1, 0x0d, 0x89, 0xb0, 0x17, 0xed, - 0xcb, 0x93, 0x3c, 0xc8, 0x5f, 0x29, 0xf2, 0xca, 0x9d, 0x29, 0x82, 0x1a, 0x0e, 0x67, 0xe5, 0x6e, - 0xe1, 0xe4, 0x62, 0xab, 0x47, 0x3d, 0x12, 0x1e, 0x19, 0x41, 0xdf, 0x11, 0x02, 0x66, 0xb8, 0x84, - 0xe3, 0x71, 0x5e, 0xc6, 0x24, 0xaf, 0x30, 0xf2, 0x38, 0x75, 0xc9, 0x88, 0xc3, 0x87, 0xe7, 0x39, - 0x30, 0xab, 0x47, 0x5c, 0x3c, 0xec, 0xd7, 0xfd, 0xad, 0x06, 0x16, 0xb6, 0x30, 0xb7, 0x7a, 0x88, - 0x30, 0x3f, 0x0a, 0x2d, 0xc2, 0xe0, 0x21, 0x58, 0xf2, 0xb0, 0x4b, 0x58, 0x80, 0x2d, 0xb2, 0x4b, - 0x06, 0xc4, 0xe2, 0x7e, 0xd8, 0xd4, 0x3a, 0xda, 0x6a, 0x63, 0xe3, 0x8e, 0x5e, 0x14, 0x37, 0xa7, - 0xd1, 0x83, 0xbe, 0x23, 0x04, 0x4c, 0x17, 0xd9, 0xe8, 0x07, 0xeb, 0xfa, 0x26, 0xde, 0x23, 0x83, - 0xcc, 0xd5, 0xbc, 0x95, 0xc4, 0xed, 0xa5, 0x27, 0xc3, 0x88, 0x68, 0x94, 0x04, 0xfa, 0x60, 0xc1, - 0xdf, 0x7b, 0x41, 0x2c, 0x9e, 0xd3, 0x5e, 0x79, 0x75, 0x5a, 0x98, 0xc4, 0xed, 0x85, 0xed, 0x0a, - 0x1c, 0x1a, 0x82, 0x87, 0xdf, 0x81, 0xf9, 0x50, 0xe5, 0x8d, 0xa2, 0x01, 0x61, 0xcd, 0xd9, 0xce, - 0xec, 0x6a, 0x63, 0xc3, 0xd4, 0xa7, 0xee, 0x21, 0x5d, 0x24, 0x66, 0x0b, 0xe7, 0x67, 0x94, 0xf7, - 0xb6, 0x03, 0x92, 0xea, 0x99, 0x79, 0xeb, 0x38, 0x6e, 0xcf, 0x24, 0x71, 0x7b, 0x1e, 0x95, 0x09, - 0x50, 0x95, 0x0f, 0xfe, 0xa4, 0x81, 0x65, 0x72, 0x68, 0x0d, 0x22, 0x9b, 0x54, 0xec, 0x9a, 0xb5, - 0x4b, 0x0b, 0xe4, 0x6d, 0x15, 0xc8, 0xf2, 0xc3, 0x31, 0x3c, 0x68, 0x2c, 0x3b, 0x7c, 0x00, 0x1a, - 0xae, 0x68, 0x8a, 0x1d, 0x7f, 0x40, 0xad, 0xa3, 0xe6, 0xf5, 0x8e, 0xb6, 0x5a, 0x37, 0xbb, 0x49, - 0xdc, 0x6e, 0x6c, 0x15, 0xe2, 0xb3, 0xb8, 0x7d, 0xb3, 0x74, 0xfc, 0xe2, 0x28, 0x20, 0xa8, 0xec, - 0xd6, 0x3d, 0xd1, 0xc0, 0x9b, 0x13, 0xa2, 0x82, 0xf7, 0x8a, 0xca, 0xcb, 0xd6, 0x68, 0x6a, 0x9d, - 0xd9, 0xd5, 0xba, 0xb9, 0x54, 0xae, 0x98, 0x54, 0xa0, 0xaa, 0x1d, 0xfc, 0x41, 0x03, 0x30, 0x1c, - 0xc1, 0x53, 0x8d, 0x72, 0x6f, 0x9a, 0x7a, 0xe9, 0x63, 0x8a, 0xb4, 0xa2, 0x8a, 0x04, 0x47, 0x75, - 0x68, 0x0c, 0x5d, 0x17, 0x83, 0xfa, 0x0e, 0x0e, 0xb1, 0xfb, 0x98, 0x7a, 0x36, 0xdc, 0x00, 0x00, - 0x07, 0xf4, 0x29, 0x09, 0x05, 0x99, 0x9c, 0x94, 0xba, 0x09, 0x15, 0x20, 0xb8, 0xbf, 0xf3, 0x48, - 0x69, 0x50, 0xc9, 0x0a, 0x76, 0x40, 0xad, 0x4f, 0x3d, 0x5b, 0xc6, 0x5d, 0x37, 0x6f, 0x28, 0xeb, - 0x9a, 0xc0, 0x43, 0x52, 0xd3, 0x7d, 0x0e, 0xe6, 0x24, 0x05, 0x22, 0xfb, 0xc2, 0x5a, 0x4c, 0x8b, - 0xc2, 0xce, 0xad, 0x45, 0x45, 0x90, 0xd4, 0x40, 0x03, 0xd4, 0xf3, 0x79, 0x52, 0xa0, 0x4b, 0xca, - 0xac, 0x9e, 0xcf, 0x1e, 0x2a, 0x6c, 0xba, 0x7f, 0x69, 0xe0, 0xad, 0xa7, 0x78, 0x40, 0x6d, 0xcc, - 0xa9, 0xe7, 0xdc, 0xcf, 0x6a, 0x95, 0x5e, 0x1d, 0xfc, 0x1a, 0xcc, 0x89, 0xa9, 0xb2, 0x31, 0xc7, - 0x6a, 0xf4, 0xdf, 0x9f, 0x6e, 0x06, 0xd3, 0x81, 0xdb, 0x22, 0x1c, 0x17, 0x25, 0x28, 0x64, 0x28, - 0x47, 0x85, 0x2f, 0x40, 0x8d, 0x05, 0xc4, 0x52, 0x17, 0xf7, 0xf9, 0x05, 0x1a, 0x7d, 0x62, 0xd4, - 0xbb, 0x01, 0xb1, 0x8a, 0xe2, 0x88, 0x13, 0x92, 0x1c, 0xdd, 0x7f, 0x34, 0xd0, 0x99, 0xe8, 0x65, - 0x52, 0xcf, 0xa6, 0x9e, 0xf3, 0x1a, 0x52, 0xfe, 0xa6, 0x92, 0xf2, 0xf6, 0x65, 0xa4, 0xac, 0x82, - 0x9f, 0x98, 0xf9, 0xdf, 0x1a, 0xb8, 0x7d, 0x9e, 0xf3, 0x26, 0x65, 0x1c, 0x7e, 0x35, 0x92, 0xbd, - 0x3e, 0xe5, 0x47, 0x97, 0xb2, 0x34, 0xf7, 0x45, 0x45, 0x3f, 0x97, 0x49, 0x4a, 0x99, 0x07, 0xe0, - 0x2a, 0xe5, 0xc4, 0x15, 0x63, 0x2a, 0x3e, 0x6b, 0x8f, 0x2f, 0x31, 0x75, 0x73, 0x5e, 0xf1, 0x5e, - 0x7d, 0x24, 0x18, 0x50, 0x4a, 0xd4, 0xfd, 0xf9, 0xca, 0xf9, 0x89, 0x8b, 0x3a, 0x89, 0xe1, 0x0d, - 0xa4, 0xf0, 0x49, 0x31, 0x60, 0xf9, 0x35, 0xee, 0xe4, 0x1a, 0x54, 0xb2, 0x82, 0xcf, 0xc1, 0x5c, - 0xa0, 0x46, 0x73, 0xcc, 0x86, 0x3a, 0x2f, 0xa3, 0x6c, 0xaa, 0xcd, 0x1b, 0xa2, 0x5a, 0xd9, 0x09, - 0xe5, 0x90, 0x30, 0x02, 0x0b, 0x6e, 0x65, 0x25, 0x37, 0x67, 0x25, 0xc9, 0xc7, 0x17, 0x20, 0xa9, - 0xee, 0xf4, 0x74, 0x19, 0x56, 0x65, 0x68, 0x88, 0xa4, 0xfb, 0xa7, 0x06, 0xde, 0x99, 0x58, 0xb2, - 0xd7, 0xd0, 0x24, 0xb4, 0xda, 0x24, 0x0f, 0x2e, 0xa5, 0x49, 0xc6, 0x77, 0xc7, 0xaf, 0xb3, 0xff, - 0x91, 0xaa, 0x6c, 0x0b, 0x0c, 0xea, 0x41, 0xf6, 0x81, 0x57, 0xb9, 0xde, 0xbd, 0xe8, 0x1d, 0x0b, - 0x5f, 0x73, 0x5e, 0x7c, 0x81, 0xf3, 0x23, 0x2a, 0x50, 0xe1, 0xb7, 0x60, 0x51, 0xde, 0xc0, 0xa7, - 0xbe, 0x27, 0x00, 0xa8, 0xc7, 0xb3, 0x35, 0xf6, 0x3f, 0x2e, 0x7a, 0x39, 0x89, 0xdb, 0x8b, 0x5b, - 0x43, 0xb0, 0x68, 0x84, 0x08, 0x0e, 0x40, 0xe3, 0x40, 0x15, 0x40, 0xac, 0xcf, 0xf4, 0xdd, 0xf3, - 0xc1, 0x2b, 0x94, 0xdc, 0xf7, 0xcc, 0x37, 0x54, 0x8d, 0x1b, 0x85, 0x8c, 0xa1, 0x32, 0x3c, 0xdc, - 0x04, 0xf3, 0xfb, 0x98, 0x0e, 0xa2, 0x90, 0xa8, 0x17, 0x45, 0x4d, 0xce, 0xd9, 0xbb, 0x62, 0xdb, - 0x7f, 0x56, 0x56, 0x9c, 0xc5, 0xed, 0xa5, 0x8a, 0x40, 0xbe, 0x2a, 0xaa, 0xce, 0xdd, 0x5f, 0x34, - 0x00, 0x0a, 0x2a, 0x78, 0x1b, 0x80, 0x87, 0x87, 0x41, 0x48, 0x58, 0x69, 0xfd, 0xd6, 0x44, 0x48, - 0xa8, 0x24, 0x87, 0x6b, 0xe0, 0xba, 0x4b, 0x18, 0xc3, 0x4e, 0xb6, 0x1e, 0x6f, 0xaa, 0xa8, 0xaf, - 0x6f, 0xa5, 0x62, 0x94, 0xe9, 0xe1, 0x33, 0x70, 0x2d, 0x24, 0x98, 0xf9, 0x9e, 0x9c, 0xbb, 0xba, - 0xf9, 0x49, 0x12, 0xb7, 0xaf, 0x21, 0x29, 0x39, 0x8b, 0xdb, 0xeb, 0xd3, 0x3c, 0xe8, 0xf5, 0x5d, - 0x8e, 0x79, 0xc4, 0x52, 0x27, 0xa4, 0xe0, 0xcc, 0xed, 0xe3, 0xd3, 0xd6, 0xcc, 0xcb, 0xd3, 0xd6, - 0xcc, 0xc9, 0x69, 0x6b, 0xe6, 0xfb, 0xa4, 0xa5, 0x1d, 0x27, 0x2d, 0xed, 0x65, 0xd2, 0xd2, 0x4e, - 0x92, 0x96, 0xf6, 0x7b, 0xd2, 0xd2, 0x7e, 0xfc, 0xa3, 0x35, 0xf3, 0xe5, 0xda, 0xd4, 0xff, 0x7d, - 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x20, 0xc8, 0x63, 0x1d, 0x40, 0x0d, 0x00, 0x00, + // 1407 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0x45, + 0x18, 0xcf, 0xc6, 0x4e, 0x9a, 0x8c, 0xf3, 0xb0, 0x87, 0x56, 0x75, 0x23, 0x6a, 0x47, 0xab, 0x0a, + 0x35, 0x12, 0xec, 0x92, 0xb4, 0x50, 0x40, 0x48, 0x28, 0xdb, 0x17, 0x7d, 0xa4, 0x89, 0xa6, 0x28, + 0x91, 0x10, 0x95, 0x98, 0xec, 0x4e, 0xec, 0xa9, 0xbd, 0x0f, 0x76, 0xd6, 0xa1, 0x11, 0x48, 0x54, + 0xe2, 0x02, 0x37, 0x0e, 0x5c, 0xf8, 0x5f, 0xb8, 0x70, 0xeb, 0xb1, 0xc7, 0x72, 0xc0, 0x22, 0xe6, + 0xc2, 0x5f, 0x00, 0x52, 0x2e, 0xa0, 0x99, 0x9d, 0x7d, 0x3b, 0xc4, 0x2e, 0x81, 0x9b, 0xf7, 0x7b, + 0xfc, 0x7e, 0xf3, 0x7d, 0xf3, 0x7d, 0x33, 0xdf, 0x18, 0xa0, 0xce, 0x3b, 0x4c, 0xa3, 0xae, 0xde, + 0xe9, 0xed, 0x12, 0xdf, 0x21, 0x01, 0x61, 0xfa, 0x3e, 0x71, 0x2c, 0xd7, 0xd7, 0xa5, 0x02, 0x7b, + 0x54, 0xc7, 0x96, 0x4d, 0x19, 0xa3, 0xae, 0xe3, 0x93, 0x16, 0x65, 0x81, 0x8f, 0x03, 0xea, 0x3a, + 0xfa, 0xfe, 0x2a, 0xee, 0x7a, 0x6d, 0xbc, 0xaa, 0xb7, 0x88, 0x43, 0x7c, 0x1c, 0x10, 0x4b, 0xf3, + 0x7c, 0x37, 0x70, 0xe1, 0x4a, 0xe8, 0xaa, 0x61, 0x8f, 0x6a, 0x43, 0x5d, 0xb5, 0xc8, 0x75, 0xe9, + 0x8d, 0x16, 0x0d, 0xda, 0xbd, 0x5d, 0xcd, 0x74, 0x6d, 0xbd, 0xe5, 0xb6, 0x5c, 0x5d, 0x20, 0xec, + 0xf6, 0xf6, 0xc4, 0x97, 0xf8, 0x10, 0xbf, 0x42, 0xe4, 0xa5, 0x2b, 0x23, 0x2c, 0x2a, 0xbf, 0x9c, + 0xa5, 0xab, 0x89, 0x93, 0x8d, 0xcd, 0x36, 0x75, 0x88, 0x7f, 0xa0, 0x7b, 0x9d, 0x16, 0x17, 0x30, + 0xdd, 0x26, 0x01, 0x1e, 0xe6, 0xa5, 0x1f, 0xe7, 0xe5, 0xf7, 0x9c, 0x80, 0xda, 0xa4, 0xe0, 0xf0, + 0xf6, 0x49, 0x0e, 0xcc, 0x6c, 0x13, 0x1b, 0xe7, 0xfd, 0x54, 0x06, 0x16, 0xd7, 0x7b, 0x16, 0x0d, + 0xd6, 0x1d, 0xc7, 0x0d, 0x44, 0x10, 0xf0, 0x22, 0x28, 0x75, 0xc8, 0x41, 0x5d, 0x59, 0x56, 0x2e, + 0xcf, 0x1a, 0x95, 0x67, 0xfd, 0xe6, 0xc4, 0xa0, 0xdf, 0x2c, 0xdd, 0x23, 0x07, 0x88, 0xcb, 0xe1, + 0x3a, 0x58, 0xdc, 0xc7, 0xdd, 0x1e, 0xb9, 0xf9, 0xc4, 0xf3, 0x89, 0x48, 0x41, 0x7d, 0x52, 0x98, + 0x9e, 0x97, 0xa6, 0x8b, 0xdb, 0x59, 0x35, 0xca, 0xdb, 0xab, 0x5d, 0x50, 0x4b, 0xbe, 0x76, 0xb0, + 0xef, 0x50, 0xa7, 0x05, 0x5f, 0x07, 0x33, 0x7b, 0x94, 0x74, 0x2d, 0x44, 0xf6, 0x24, 0x60, 0x55, + 0x02, 0xce, 0xdc, 0x92, 0x72, 0x14, 0x5b, 0xc0, 0x15, 0x70, 0xe6, 0xf3, 0xd0, 0xb1, 0x5e, 0x12, + 0xc6, 0x8b, 0xd2, 0xf8, 0x8c, 0xc4, 0x43, 0x91, 0x5e, 0xdd, 0x03, 0x0b, 0x1b, 0x38, 0x30, 0xdb, + 0xd7, 0x5d, 0xc7, 0xa2, 0x22, 0xc2, 0x65, 0x50, 0x76, 0xb0, 0x4d, 0x64, 0x88, 0x73, 0xd2, 0xb3, + 0xfc, 0x00, 0xdb, 0x04, 0x09, 0x0d, 0x5c, 0x03, 0x80, 0xe4, 0xe3, 0x83, 0xd2, 0x0e, 0xa4, 0x42, + 0x4b, 0x59, 0xa9, 0x3f, 0x97, 0x25, 0x11, 0x22, 0xcc, 0xed, 0xf9, 0x26, 0x61, 0xf0, 0x09, 0xa8, + 0x71, 0x38, 0xe6, 0x61, 0x93, 0x3c, 0x24, 0x5d, 0x62, 0x06, 0xae, 0x2f, 0x58, 0x2b, 0x6b, 0x57, + 0xb4, 0xa4, 0x4e, 0xe3, 0x1d, 0xd3, 0xbc, 0x4e, 0x8b, 0x0b, 0x98, 0xc6, 0x0b, 0x43, 0xdb, 0x5f, + 0xd5, 0xee, 0xe3, 0x5d, 0xd2, 0x8d, 0x5c, 0x8d, 0x73, 0x83, 0x7e, 0xb3, 0xf6, 0x20, 0x8f, 0x88, + 0x8a, 0x24, 0xd0, 0x05, 0x0b, 0xee, 0xee, 0x63, 0x62, 0x06, 0x31, 0xed, 0xe4, 0xcb, 0xd3, 0xc2, + 0x41, 0xbf, 0xb9, 0xb0, 0x99, 0x81, 0x43, 0x39, 0x78, 0xf8, 0x15, 0x98, 0xf7, 0x65, 0xdc, 0xa8, + 0xd7, 0x25, 0xac, 0x5e, 0x5a, 0x2e, 0x5d, 0xae, 0xac, 0x19, 0xda, 0xc8, 0xed, 0xa8, 0xf1, 0xc0, + 0x2c, 0xee, 0xbc, 0x43, 0x83, 0xf6, 0xa6, 0x47, 0x42, 0x3d, 0x33, 0xce, 0xc9, 0xc4, 0xcf, 0xa3, + 0x34, 0x01, 0xca, 0xf2, 0xc1, 0xef, 0x15, 0x70, 0x96, 0x3c, 0x31, 0xbb, 0x3d, 0x8b, 0x64, 0xec, + 0xea, 0xe5, 0x53, 0x5b, 0xc8, 0xab, 0x72, 0x21, 0x67, 0x6f, 0x0e, 0xe1, 0x41, 0x43, 0xd9, 0xe1, + 0x0d, 0x50, 0xb1, 0x79, 0x51, 0x6c, 0xb9, 0x5d, 0x6a, 0x1e, 0xd4, 0xcf, 0x88, 0x52, 0x52, 0x07, + 0xfd, 0x66, 0x65, 0x23, 0x11, 0x1f, 0xf5, 0x9b, 0x8b, 0xa9, 0xcf, 0x8f, 0x0e, 0x3c, 0x82, 0xd2, + 0x6e, 0xea, 0x0b, 0x05, 0x9c, 0x3f, 0x66, 0x55, 0xf0, 0x5a, 0x92, 0x79, 0x51, 0x1a, 0x75, 0x65, + 0xb9, 0x74, 0x79, 0xd6, 0xa8, 0xa5, 0x33, 0x26, 0x14, 0x28, 0x6b, 0x07, 0xbf, 0x56, 0x00, 0xf4, + 0x0b, 0x78, 0xb2, 0x50, 0xae, 0x8d, 0x92, 0x2f, 0x6d, 0x48, 0x92, 0x96, 0x64, 0x92, 0x60, 0x51, + 0x87, 0x86, 0xd0, 0xa9, 0x18, 0xcc, 0x6e, 0x61, 0x1f, 0xdb, 0xf7, 0xa8, 0x63, 0xf1, 0xbe, 0xc3, + 0x1e, 0xdd, 0x26, 0xbe, 0xe8, 0x3b, 0x25, 0xdb, 0x77, 0xeb, 0x5b, 0x77, 0xa4, 0x06, 0xa5, 0xac, + 0x78, 0x37, 0x77, 0xa8, 0x63, 0xc9, 0x2e, 0x8d, 0xbb, 0x99, 0xe3, 0x21, 0xa1, 0x51, 0x1f, 0x81, + 0x19, 0x41, 0xc1, 0x0f, 0x8e, 0x93, 0x7b, 0x5f, 0x07, 0xb3, 0x71, 0x3f, 0x49, 0xd0, 0x9a, 0x34, + 0x9b, 0x8d, 0x7b, 0x0f, 0x25, 0x36, 0xea, 0x0f, 0x0a, 0x98, 0xe3, 0x5b, 0x76, 0xbd, 0x4d, 0xcc, + 0x0e, 0x3f, 0xca, 0xbe, 0x51, 0x00, 0x24, 0xf9, 0x03, 0x2e, 0xdc, 0x97, 0xca, 0xda, 0xfb, 0x63, + 0x14, 0x62, 0xe1, 0x94, 0x4c, 0xb2, 0x5b, 0x50, 0x31, 0x34, 0x84, 0x53, 0xfd, 0x65, 0x12, 0x5c, + 0xd8, 0xc6, 0x5d, 0x6a, 0xe1, 0x80, 0x3a, 0xad, 0xf5, 0x88, 0x2e, 0x2c, 0x2b, 0xf8, 0x29, 0x98, + 0xe1, 0x1d, 0x6f, 0xe1, 0x00, 0xcb, 0x63, 0xe9, 0xcd, 0xd1, 0xce, 0x87, 0xf0, 0x30, 0xd8, 0x20, + 0x01, 0x4e, 0xb6, 0x27, 0x91, 0xa1, 0x18, 0x15, 0x3e, 0x06, 0x65, 0xe6, 0x11, 0x53, 0x16, 0xd5, + 0x87, 0x63, 0xc4, 0x7e, 0xec, 0xaa, 0x1f, 0x7a, 0xc4, 0x4c, 0x36, 0x8e, 0x7f, 0x21, 0xc1, 0x01, + 0x7d, 0x30, 0xcd, 0x02, 0x1c, 0xf4, 0x98, 0xb8, 0x12, 0x2a, 0x6b, 0x77, 0x4f, 0x85, 0x4d, 0x20, + 0x1a, 0x0b, 0x92, 0x6f, 0x3a, 0xfc, 0x46, 0x92, 0x49, 0xfd, 0x53, 0x01, 0xcb, 0xc7, 0xfa, 0x1a, + 0xd4, 0xb1, 0x78, 0x3d, 0xfc, 0xf7, 0x69, 0xfe, 0x2c, 0x93, 0xe6, 0xcd, 0xd3, 0x08, 0x5c, 0x2e, + 0xfe, 0xb8, 0x6c, 0xab, 0x7f, 0x28, 0xe0, 0xd2, 0x49, 0xce, 0xf7, 0x29, 0x0b, 0xe0, 0x27, 0x85, + 0xe8, 0xb5, 0x11, 0x2f, 0x21, 0xca, 0xc2, 0xd8, 0xe3, 0x41, 0x20, 0x92, 0xa4, 0x22, 0xf7, 0xc0, + 0x14, 0x0d, 0x88, 0xcd, 0x8f, 0x2d, 0xde, 0x5d, 0xf7, 0x4e, 0x31, 0x74, 0x63, 0x5e, 0xf2, 0x4e, + 0xdd, 0xe1, 0x0c, 0x28, 0x24, 0x52, 0xbf, 0x2d, 0x9d, 0x1c, 0x38, 0xcf, 0x13, 0x3f, 0xcc, 0x3c, + 0x21, 0x7c, 0x90, 0x1c, 0x38, 0xf1, 0x36, 0x6e, 0xc5, 0x1a, 0x94, 0xb2, 0x82, 0x8f, 0xc0, 0x8c, + 0x27, 0x8f, 0xaa, 0x21, 0x37, 0xf6, 0x49, 0x11, 0x45, 0xa7, 0x9c, 0x31, 0xc7, 0xb3, 0x15, 0x7d, + 0xa1, 0x18, 0x12, 0xf6, 0xc0, 0x82, 0x9d, 0x19, 0x51, 0x64, 0xab, 0xbc, 0x3b, 0x06, 0x49, 0x76, + 0xc6, 0x09, 0x87, 0x83, 0xac, 0x0c, 0xe5, 0x48, 0xe0, 0x0e, 0xa8, 0xed, 0xcb, 0x8c, 0xb9, 0xce, + 0xba, 0x19, 0xde, 0x33, 0x65, 0x71, 0x4d, 0xad, 0xf0, 0x91, 0x66, 0x3b, 0xaf, 0x3c, 0xea, 0x37, + 0xab, 0x79, 0x21, 0x2a, 0x62, 0xa8, 0xbf, 0x2b, 0xe0, 0xe2, 0xb1, 0x7b, 0xf1, 0x3f, 0x54, 0x1f, + 0xcd, 0x56, 0xdf, 0x8d, 0x53, 0xa9, 0xbe, 0xe1, 0x65, 0xf7, 0xe3, 0xd4, 0x3f, 0x84, 0x2a, 0xea, + 0x0d, 0x83, 0x59, 0x2f, 0xba, 0x49, 0x65, 0xac, 0x57, 0xc7, 0x2d, 0x1e, 0xee, 0x6b, 0xcc, 0xf3, + 0xab, 0x2e, 0xfe, 0x44, 0x09, 0x2a, 0xfc, 0x02, 0x54, 0x6d, 0x39, 0x4b, 0x73, 0x00, 0xea, 0x04, + 0xd1, 0xbc, 0xf0, 0x2f, 0x2a, 0xe8, 0xec, 0xa0, 0xdf, 0xac, 0x6e, 0xe4, 0x60, 0x51, 0x81, 0x08, + 0x76, 0x41, 0x25, 0xa9, 0x80, 0x68, 0xc0, 0x7c, 0xeb, 0x25, 0x52, 0xee, 0x3a, 0xc6, 0x2b, 0x32, + 0xc7, 0x95, 0x44, 0xc6, 0x50, 0x1a, 0x1e, 0xde, 0x07, 0xf3, 0x7b, 0x98, 0x76, 0x7b, 0x3e, 0x91, + 0xa3, 0x5b, 0x59, 0x34, 0xf0, 0x6b, 0x7c, 0xac, 0xba, 0x95, 0x56, 0x1c, 0xf5, 0x9b, 0xb5, 0x8c, + 0x40, 0x8c, 0x6f, 0x59, 0x67, 0xf8, 0x54, 0x01, 0x55, 0x9c, 0x7d, 0x68, 0xb1, 0xfa, 0x94, 0x88, + 0xe0, 0xbd, 0x31, 0x22, 0xc8, 0xbd, 0xd5, 0x8c, 0xba, 0x0c, 0xa3, 0x9a, 0x53, 0x30, 0x54, 0x60, + 0x83, 0x5f, 0x82, 0x45, 0x3b, 0xf3, 0x0e, 0x62, 0xf5, 0x69, 0xb1, 0x80, 0xb1, 0xb7, 0x2e, 0x46, + 0x48, 0xde, 0x7c, 0x59, 0x39, 0x43, 0x79, 0x2a, 0xf5, 0xa7, 0x49, 0xd0, 0x3c, 0xe1, 0x92, 0x85, + 0x77, 0x01, 0x74, 0x77, 0x19, 0xf1, 0xf7, 0x89, 0x75, 0x3b, 0x7c, 0xa7, 0x46, 0x53, 0x60, 0x29, + 0x19, 0x7c, 0x36, 0x0b, 0x16, 0x68, 0x88, 0x17, 0xb4, 0xc1, 0x5c, 0x90, 0x9a, 0xc9, 0xc6, 0x99, + 0x6a, 0x65, 0xa8, 0xe9, 0x91, 0xce, 0xa8, 0x0e, 0xfa, 0xcd, 0xcc, 0x90, 0x87, 0x32, 0xf0, 0xd0, + 0x04, 0xc0, 0x4c, 0xf2, 0x1a, 0x96, 0xa6, 0x3e, 0xda, 0x41, 0x93, 0x64, 0x33, 0xbe, 0x1c, 0x52, + 0x89, 0x4c, 0xc1, 0xaa, 0x7f, 0x29, 0x00, 0x24, 0xf5, 0x0a, 0x2f, 0x81, 0xd4, 0x53, 0x54, 0xde, + 0x2f, 0x65, 0x0e, 0x81, 0x52, 0x72, 0xfe, 0x52, 0xb6, 0x09, 0x63, 0xb8, 0x15, 0x0d, 0xb3, 0xf1, + 0x4b, 0x79, 0x23, 0x14, 0xa3, 0x48, 0x0f, 0x77, 0xc0, 0xb4, 0x4f, 0x30, 0x73, 0x1d, 0xf9, 0xa6, + 0xfe, 0x80, 0x0f, 0x3c, 0x48, 0x48, 0x8e, 0xfa, 0xcd, 0xd5, 0x51, 0xfe, 0xc9, 0xd0, 0xe4, 0x7c, + 0x24, 0x9c, 0x90, 0x84, 0x83, 0xb7, 0x41, 0x4d, 0x72, 0xa4, 0x16, 0x1c, 0xf6, 0xd3, 0x05, 0xb9, + 0x9a, 0xda, 0x46, 0xde, 0x00, 0x15, 0x7d, 0x8c, 0xcd, 0x67, 0x87, 0x8d, 0x89, 0xe7, 0x87, 0x8d, + 0x89, 0x17, 0x87, 0x8d, 0x89, 0xa7, 0x83, 0x86, 0xf2, 0x6c, 0xd0, 0x50, 0x9e, 0x0f, 0x1a, 0xca, + 0x8b, 0x41, 0x43, 0xf9, 0x75, 0xd0, 0x50, 0xbe, 0xfb, 0xad, 0x31, 0xf1, 0xf1, 0xca, 0xc8, 0xff, + 0x1e, 0xfd, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x08, 0xaf, 0xaa, 0x52, 0x82, 0x12, 0x00, 0x00, +} + +func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditAnnotation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditAnnotation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ValueExpression) + copy(dAtA[i:], m.ValueExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValueExpression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExpressionWarning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExpressionWarning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Warning) + copy(dAtA[i:], m.Warning) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warning))) + i-- + dAtA[i] = 0x1a + i -= len(m.FieldRef) + copy(dAtA[i:], m.FieldRef) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldRef))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} + +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MatchResources) Marshal() (dAtA []byte, err error) { @@ -631,6 +897,43 @@ func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -651,6 +954,16 @@ func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, erro _ = i var l int _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -784,6 +1097,15 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) _ = i var l int _ = l + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } if m.MatchResources != nil { { size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) @@ -883,6 +1205,34 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if m.FailurePolicy != nil { i -= len(*m.FailurePolicy) copy(dAtA[i:], *m.FailurePolicy) @@ -931,7 +1281,7 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *Validation) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -941,27 +1291,84 @@ func (m *Validation) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Validation) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Reason != nil { - i -= len(*m.Reason) - copy(dAtA[i:], *m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) - i-- - dAtA[i] = 0x1a - } - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- dAtA[i] = 0x12 i -= len(m.Expression) copy(dAtA[i:], m.Expression) @@ -982,6 +1389,45 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExpressionWarning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *MatchResources) Size() (n int) { if m == nil { return 0 @@ -1058,6 +1504,21 @@ func (m *ParamRef) Size() (n int) { return n } +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ValidatingAdmissionPolicy) Size() (n int) { if m == nil { return 0 @@ -1068,6 +1529,8 @@ func (m *ValidatingAdmissionPolicy) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1117,6 +1580,12 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { l = m.MatchResources.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1161,6 +1630,38 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) { l = len(*m.FailurePolicy) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1178,6 +1679,8 @@ func (m *Validation) Size() (n int) { l = len(*m.Reason) n += 1 + l + sovGenerated(uint64(l)) } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1187,6 +1690,39 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AuditAnnotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, + `}`, + }, "") + return s +} +func (this *ExpressionWarning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} func (this *MatchResources) String() string { if this == nil { return "nil" @@ -1244,6 +1780,21 @@ func (this *ParamRef) String() string { }, "") return s } +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} func (this *ValidatingAdmissionPolicy) String() string { if this == nil { return "nil" @@ -1251,6 +1802,7 @@ func (this *ValidatingAdmissionPolicy) String() string { s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1290,6 +1842,7 @@ func (this *ValidatingAdmissionPolicyBindingSpec) String() string { `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, `}`, }, "") return s @@ -1314,39 +1867,411 @@ func (this *ValidatingAdmissionPolicySpec) String() string { if this == nil { return "nil" } - repeatedStringForValidations := "[]Validation{" - for _, f := range this.Validations { - repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - repeatedStringForValidations += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, - `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, - `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, - `Validations:` + repeatedStringForValidations + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `}`, - }, "") - return s + return nil } -func (this *Validation) String() string { - if this == nil { - return "nil" +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&Validation{`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + valueToStringGenerated(this.Reason) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } func (m *MatchResources) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -1914,6 +2839,90 @@ func (m *ParamRef) Unmarshal(dAtA []byte) error { } return nil } +func (m *TypeChecking) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeChecking: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeChecking: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpressionWarnings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExpressionWarnings = append(m.ExpressionWarnings, ExpressionWarning{}) + if err := m.ExpressionWarnings[len(m.ExpressionWarnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1945,7 +2954,40 @@ func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1972,13 +3014,13 @@ func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2005,7 +3047,7 @@ func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2396,6 +3438,38 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidationActions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidationActions = append(m.ValidationActions, ValidationAction(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2702,6 +3776,213 @@ func (m *ValidatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { s := FailurePolicyType(dAtA[iNdEx:postIndex]) m.FailurePolicy = &s iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuditAnnotations = append(m.AuditAnnotations, AuditAnnotation{}) + if err := m.AuditAnnotations[len(m.AuditAnnotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeChecking", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TypeChecking == nil { + m.TypeChecking = &TypeChecking{} + } + if err := m.TypeChecking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2849,6 +4130,38 @@ func (m *Validation) Unmarshal(dAtA []byte) error { s := k8s_io_apimachinery_pkg_apis_meta_v1.StatusReason(dAtA[iNdEx:postIndex]) m.Reason = &s iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index fe8236cd..c718c546 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -29,6 +29,84 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1alpha1"; +// AuditAnnotation describes how to produce an audit annotation for an API request. +message AuditAnnotation { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + optional string key = 1; + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + optional string valueExpression = 2; +} + +// ExpressionWarning is a warning information that targets a specific expression. +message ExpressionWarning { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + optional string fieldRef = 2; + + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + optional string warning = 3; +} + +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + // MatchResources decides whether to run the admission control policy on an object based // on whether it meets the match criteria. // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) @@ -161,6 +239,15 @@ message ParamRef { optional string namespace = 2; } +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +message TypeChecking { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + repeated ExpressionWarning expressionWarnings = 1; +} + // ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. message ValidatingAdmissionPolicy { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. @@ -169,6 +256,13 @@ message ValidatingAdmissionPolicy { // Specification of the desired behavior of the ValidatingAdmissionPolicy. optional ValidatingAdmissionPolicySpec spec = 2; + + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + optional ValidatingAdmissionPolicyStatus status = 3; } // ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. @@ -213,6 +307,48 @@ message ValidatingAdmissionPolicyBindingSpec { // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. // +optional optional MatchResources matchResources = 3; + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + repeated string validationActions = 4; } // ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. @@ -243,30 +379,91 @@ message ValidatingAdmissionPolicySpec { optional MatchResources matchConstraints = 2; // Validations contain CEL expressions which is used to apply the validation. - // A minimum of one validation is required for a policy definition. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. // +listType=atomic - // Required. + // +optional repeated Validation validations = 3; - // FailurePolicy defines how to handle failures for the admission policy. - // Failures can occur from invalid or mis-configured policy definitions or bindings. + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // // A policy is invalid if spec.paramKind refers to a non-existent Kind. // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // // Allowed values are Ignore or Fail. Defaults to Fail. // +optional optional string failurePolicy = 4; + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + repeated AuditAnnotation auditAnnotations = 5; + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; +} + +// ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy. +message ValidatingAdmissionPolicyStatus { + // The generation observed by the controller. + // +optional + optional int64 observedGeneration = 1; + + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + optional TypeChecking typeChecking = 2; + + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; } // Validation specifies the CEL expression which is used to apply the validation. message Validation { // Expression represents the expression which will be evaluated by CEL. // ref: https://github.com/google/cel-spec - // CEL expressions have access to the contents of the Admission request/response, organized into CEL variables as well as some other useful variables: + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: // - // 'object' - The object from the incoming request. The value is null for DELETE requests. - // 'oldObject' - The existing object. The value is null for CREATE requests. - // 'request' - Attributes of the admission request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - // 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. // // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the // object. No other metadata properties are accessible. @@ -313,5 +510,18 @@ message Validation { // If not set, StatusReasonInvalid is used in the response to the client. // +optional optional string reason = 3; + + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + optional string messageExpression = 4; } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index b64bc628..2bbb55a4 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -74,6 +74,49 @@ type ValidatingAdmissionPolicy struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the ValidatingAdmissionPolicy. Spec ValidatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + Status ValidatingAdmissionPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy. +type ValidatingAdmissionPolicyStatus struct { + // The generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + TypeChecking *TypeChecking `json:"typeChecking,omitempty" protobuf:"bytes,2,opt,name=typeChecking"` + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +type TypeChecking struct { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + ExpressionWarnings []ExpressionWarning `json:"expressionWarnings,omitempty" protobuf:"bytes,1,rep,name=expressionWarnings"` +} + +// ExpressionWarning is a warning information that targets a specific expression. +type ExpressionWarning struct { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + FieldRef string `json:"fieldRef" protobuf:"bytes,2,opt,name=fieldRef"` + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + Warning string `json:"warning" protobuf:"bytes,3,opt,name=warning"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -107,20 +150,61 @@ type ValidatingAdmissionPolicySpec struct { MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` // Validations contain CEL expressions which is used to apply the validation. - // A minimum of one validation is required for a policy definition. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. // +listType=atomic - // Required. - Validations []Validation `json:"validations" protobuf:"bytes,3,rep,name=validations"` + // +optional + Validations []Validation `json:"validations,omitempty" protobuf:"bytes,3,rep,name=validations"` - // FailurePolicy defines how to handle failures for the admission policy. - // Failures can occur from invalid or mis-configured policy definitions or bindings. + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // // A policy is invalid if spec.paramKind refers to a non-existent Kind. // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // // Allowed values are Ignore or Fail. Defaults to Fail. // +optional FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + AuditAnnotations []AuditAnnotation `json:"auditAnnotations,omitempty" protobuf:"bytes,5,rep,name=auditAnnotations"` + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` } +type MatchCondition v1.MatchCondition + // ParamKind is a tuple of Group Kind and Version. // +structType=atomic type ParamKind struct { @@ -138,12 +222,16 @@ type ParamKind struct { type Validation struct { // Expression represents the expression which will be evaluated by CEL. // ref: https://github.com/google/cel-spec - // CEL expressions have access to the contents of the Admission request/response, organized into CEL variables as well as some other useful variables: + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: // - //'object' - The object from the incoming request. The value is null for DELETE requests. - //'oldObject' - The existing object. The value is null for CREATE requests. - //'request' - Attributes of the admission request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - //'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. // // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the // object. No other metadata properties are accessible. @@ -188,6 +276,55 @@ type Validation struct { // If not set, StatusReasonInvalid is used in the response to the client. // +optional Reason *metav1.StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,4,opt,name=messageExpression"` +} + +// AuditAnnotation describes how to produce an audit annotation for an API request. +type AuditAnnotation struct { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + ValueExpression string `json:"valueExpression" protobuf:"bytes,2,opt,name=valueExpression"` } // +genclient @@ -240,6 +377,48 @@ type ValidatingAdmissionPolicyBindingSpec struct { // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. // +optional MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + ValidationActions []ValidationAction `json:"validationActions,omitempty" protobuf:"bytes,4,rep,name=validationActions"` } // ParamRef references a parameter resource @@ -344,6 +523,24 @@ type MatchResources struct { MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,7,opt,name=matchPolicy,casttype=MatchPolicyType"` } +// ValidationAction specifies a policy enforcement action. +// +enum +type ValidationAction string + +const ( + // Deny specifies that a validation failure results in a denied request. + Deny ValidationAction = "Deny" + // Warn specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + Warn ValidationAction = "Warn" + // Audit specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failure. + Audit ValidationAction = "Audit" +) + // NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. // +structType=atomic type NamedRuleWithOperations struct { diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go index a670bb20..b3cac182 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -24,9 +24,29 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditAnnotation = map[string]string{ + "": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "valueExpression": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", +} + +func (AuditAnnotation) SwaggerDoc() map[string]string { + return map_AuditAnnotation +} + +var map_ExpressionWarning = map[string]string{ + "": "ExpressionWarning is a warning information that targets a specific expression.", + "fieldRef": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "warning": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", +} + +func (ExpressionWarning) SwaggerDoc() map[string]string { + return map_ExpressionWarning +} + var map_MatchResources = map[string]string{ "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", @@ -69,10 +89,20 @@ func (ParamRef) SwaggerDoc() map[string]string { return map_ParamRef } +var map_TypeChecking = map[string]string{ + "": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "expressionWarnings": "The type checking warnings for each expression.", +} + +func (TypeChecking) SwaggerDoc() map[string]string { + return map_TypeChecking +} + var map_ValidatingAdmissionPolicy = map[string]string{ "": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicy.", + "status": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.", } func (ValidatingAdmissionPolicy) SwaggerDoc() map[string]string { @@ -100,10 +130,11 @@ func (ValidatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { } var map_ValidatingAdmissionPolicyBindingSpec = map[string]string{ - "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", - "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", - "paramRef": "ParamRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.", - "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "ParamRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.", + "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "validationActions": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", } func (ValidatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { @@ -124,19 +155,33 @@ var map_ValidatingAdmissionPolicySpec = map[string]string{ "": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", "paramKind": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.", "matchConstraints": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.", - "validations": "Validations contain CEL expressions which is used to apply the validation. A minimum of one validation is required for a policy definition. Required.", - "failurePolicy": "FailurePolicy defines how to handle failures for the admission policy. Failures can occur from invalid or mis-configured policy definitions or bindings. A policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource. Allowed values are Ignore or Fail. Defaults to Fail.", + "validations": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "auditAnnotations": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", } func (ValidatingAdmissionPolicySpec) SwaggerDoc() map[string]string { return map_ValidatingAdmissionPolicySpec } +var map_ValidatingAdmissionPolicyStatus = map[string]string{ + "": "ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy.", + "observedGeneration": "The generation observed by the controller.", + "typeChecking": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.", + "conditions": "The conditions represent the latest available observations of a policy's current state.", +} + +func (ValidatingAdmissionPolicyStatus) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyStatus +} + var map_Validation = map[string]string{ - "": "Validation specifies the CEL expression which is used to apply the validation.", - "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the Admission request/response, organized into CEL variables as well as some other useful variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", - "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", - "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "": "Validation specifies the CEL expression which is used to apply the validation.", + "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "messageExpression": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", } func (Validation) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go index 4f29ac7a..8e4abfd0 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -26,6 +26,54 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditAnnotation. +func (in *AuditAnnotation) DeepCopy() *AuditAnnotation { + if in == nil { + return nil + } + out := new(AuditAnnotation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressionWarning) DeepCopyInto(out *ExpressionWarning) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionWarning. +func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { + if in == nil { + return nil + } + out := new(ExpressionWarning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MatchResources) DeepCopyInto(out *MatchResources) { *out = *in @@ -125,12 +173,34 @@ func (in *ParamRef) DeepCopy() *ParamRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeChecking) DeepCopyInto(out *TypeChecking) { + *out = *in + if in.ExpressionWarnings != nil { + in, out := &in.ExpressionWarnings, &out.ExpressionWarnings + *out = make([]ExpressionWarning, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeChecking. +func (in *TypeChecking) DeepCopy() *TypeChecking { + if in == nil { + return nil + } + out := new(TypeChecking) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidatingAdmissionPolicy) DeepCopyInto(out *ValidatingAdmissionPolicy) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) return } @@ -225,6 +295,11 @@ func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmi *out = new(MatchResources) (*in).DeepCopyInto(*out) } + if in.ValidationActions != nil { + in, out := &in.ValidationActions, &out.ValidationActions + *out = make([]ValidationAction, len(*in)) + copy(*out, *in) + } return } @@ -296,6 +371,16 @@ func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPo *out = new(FailurePolicyType) **out = **in } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make([]AuditAnnotation, len(*in)) + copy(*out, *in) + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -309,6 +394,34 @@ func (in *ValidatingAdmissionPolicySpec) DeepCopy() *ValidatingAdmissionPolicySp return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyStatus) DeepCopyInto(out *ValidatingAdmissionPolicyStatus) { + *out = *in + if in.TypeChecking != nil { + in, out := &in.TypeChecking, &out.TypeChecking + *out = new(TypeChecking) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatus. +func (in *ValidatingAdmissionPolicyStatus) DeepCopy() *ValidatingAdmissionPolicyStatus { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Validation) DeepCopyInto(out *Validation) { *out = *in diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 56a9f10e..8fb354c3 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -45,10 +45,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{0} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{0} + return fileDescriptor_abeea74cbc46f55a, []int{1} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +104,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{1} + return fileDescriptor_abeea74cbc46f55a, []int{2} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -104,7 +132,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{2} + return fileDescriptor_abeea74cbc46f55a, []int{3} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +160,7 @@ var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{3} + return fileDescriptor_abeea74cbc46f55a, []int{4} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -160,7 +188,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{4} + return fileDescriptor_abeea74cbc46f55a, []int{5} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +216,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{5} + return fileDescriptor_abeea74cbc46f55a, []int{6} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,7 +244,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{6} + return fileDescriptor_abeea74cbc46f55a, []int{7} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +272,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{7} + return fileDescriptor_abeea74cbc46f55a, []int{8} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,6 +298,7 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition") proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") @@ -285,68 +314,106 @@ func init() { } var fileDescriptor_abeea74cbc46f55a = []byte{ - // 974 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x49, 0x6f, 0xdb, 0x46, - 0x14, 0x36, 0x2d, 0x29, 0x92, 0x46, 0xb2, 0x13, 0x4d, 0x97, 0xb0, 0x6e, 0x40, 0x0a, 0x3a, 0x14, - 0xba, 0x94, 0x4c, 0x9c, 0xa2, 0x4b, 0x8a, 0x1e, 0x42, 0xb7, 0x41, 0x0b, 0xd8, 0x4e, 0x3a, 0xce, - 0x02, 0xb4, 0x29, 0x90, 0x11, 0xf5, 0x24, 0x4d, 0x45, 0x72, 0x04, 0xce, 0x50, 0xa9, 0x6f, 0xfd, - 0x09, 0xfd, 0x0b, 0xfd, 0x21, 0xbd, 0xf5, 0xe0, 0x63, 0x8e, 0xb9, 0x94, 0xa8, 0xd9, 0x5e, 0x7b, - 0xe8, 0xd5, 0xa7, 0x82, 0x8b, 0x76, 0x39, 0x21, 0x5c, 0x20, 0x27, 0xdf, 0x34, 0xdf, 0xe3, 0xf7, - 0xbd, 0x79, 0x6f, 0xde, 0x02, 0xa1, 0x6f, 0x87, 0x9f, 0x0a, 0x83, 0x71, 0x73, 0x18, 0x74, 0xc0, - 0xf7, 0x40, 0x82, 0x30, 0xc7, 0xe0, 0x75, 0xb9, 0x6f, 0x66, 0x06, 0x3a, 0x62, 0x26, 0xed, 0xba, - 0x4c, 0x08, 0xc6, 0x3d, 0x1f, 0xfa, 0x4c, 0x48, 0x9f, 0x4a, 0xc6, 0x3d, 0x73, 0x7c, 0xab, 0x03, - 0x92, 0xde, 0x32, 0xfb, 0xe0, 0x81, 0x4f, 0x25, 0x74, 0x8d, 0x91, 0xcf, 0x25, 0xc7, 0xed, 0x94, - 0x69, 0xd0, 0x11, 0x33, 0xd6, 0x32, 0x8d, 0x8c, 0xb9, 0xf3, 0x61, 0x9f, 0xc9, 0x41, 0xd0, 0x31, - 0x6c, 0xee, 0x9a, 0x7d, 0xde, 0xe7, 0x66, 0x22, 0xd0, 0x09, 0x7a, 0xc9, 0x29, 0x39, 0x24, 0xbf, - 0x52, 0xe1, 0x9d, 0xdb, 0x39, 0xae, 0xb4, 0x7c, 0x9b, 0x9d, 0x8f, 0x66, 0x24, 0x97, 0xda, 0x03, - 0xe6, 0x81, 0x7f, 0x6c, 0x8e, 0x86, 0xfd, 0x18, 0x10, 0xa6, 0x0b, 0x92, 0xae, 0x63, 0x99, 0xe7, - 0xb1, 0xfc, 0xc0, 0x93, 0xcc, 0x85, 0x15, 0xc2, 0xc7, 0xaf, 0x23, 0x08, 0x7b, 0x00, 0x2e, 0x5d, - 0xe6, 0xb5, 0x7e, 0x2f, 0xa3, 0xab, 0x07, 0x81, 0xa4, 0x92, 0x79, 0xfd, 0x27, 0xd0, 0x19, 0x70, - 0x3e, 0xc4, 0x4d, 0x54, 0xf4, 0xa8, 0x0b, 0xaa, 0xd2, 0x54, 0xda, 0x55, 0xab, 0x7e, 0x12, 0xea, - 0x1b, 0x51, 0xa8, 0x17, 0x0f, 0xa9, 0x0b, 0x24, 0xb1, 0xe0, 0xe7, 0xa8, 0x6e, 0x3b, 0x0c, 0x3c, - 0xb9, 0xc7, 0xbd, 0x1e, 0xeb, 0xab, 0x9b, 0x4d, 0xa5, 0x5d, 0xdb, 0xfd, 0xc2, 0xc8, 0x9b, 0x79, - 0x23, 0x73, 0xb5, 0x37, 0x27, 0x62, 0xbd, 0x9d, 0x39, 0xaa, 0xcf, 0xa3, 0x64, 0xc1, 0x11, 0x7e, - 0x8a, 0x4a, 0x7e, 0xe0, 0x80, 0x50, 0x0b, 0xcd, 0x42, 0xbb, 0xb6, 0xfb, 0x49, 0x1e, 0x8f, 0x06, - 0x09, 0x1c, 0x78, 0xc2, 0xe4, 0xe0, 0xfe, 0x08, 0x52, 0x50, 0x58, 0x5b, 0x99, 0xaf, 0x52, 0x6c, - 0x13, 0x24, 0x15, 0xc5, 0xfb, 0x68, 0xab, 0x47, 0x99, 0x13, 0xf8, 0xf0, 0x80, 0x3b, 0xcc, 0x3e, - 0x56, 0x8b, 0x49, 0x06, 0x3e, 0x88, 0x42, 0x7d, 0xeb, 0xde, 0xbc, 0xe1, 0x2c, 0xd4, 0x1b, 0x0b, - 0xc0, 0xc3, 0xe3, 0x11, 0x90, 0x45, 0x32, 0xfe, 0x12, 0xd5, 0x5c, 0x2a, 0xed, 0x41, 0xa6, 0x55, - 0x4d, 0xb4, 0x5a, 0x51, 0xa8, 0xd7, 0x0e, 0x66, 0xf0, 0x59, 0xa8, 0x5f, 0x9d, 0x3b, 0x26, 0x3a, - 0xf3, 0x34, 0xfc, 0x13, 0x6a, 0xc4, 0x29, 0x17, 0x23, 0x6a, 0xc3, 0x11, 0x38, 0x60, 0x4b, 0xee, - 0xab, 0xa5, 0x24, 0xdf, 0xb7, 0xe7, 0xa2, 0x9f, 0x3e, 0xba, 0x31, 0x1a, 0xf6, 0x63, 0x40, 0x18, - 0x71, 0x6d, 0xc5, 0xe1, 0xef, 0xd3, 0x0e, 0x38, 0x13, 0xaa, 0xf5, 0x4e, 0x14, 0xea, 0x8d, 0xc3, - 0x65, 0x45, 0xb2, 0xea, 0x04, 0x73, 0xb4, 0xcd, 0x3b, 0x3f, 0x82, 0x2d, 0xa7, 0x6e, 0x6b, 0x17, - 0x77, 0x8b, 0xa3, 0x50, 0xdf, 0xbe, 0xbf, 0x20, 0x47, 0x96, 0xe4, 0xe3, 0x84, 0x09, 0xd6, 0x85, - 0xaf, 0x7a, 0x3d, 0xb0, 0xa5, 0x50, 0xaf, 0xcc, 0x12, 0x76, 0x34, 0x83, 0xe3, 0x84, 0xcd, 0x8e, - 0x7b, 0x0e, 0x15, 0x82, 0xcc, 0xd3, 0xf0, 0x1d, 0xb4, 0x1d, 0x17, 0x3c, 0x0f, 0xe4, 0x11, 0xd8, - 0xdc, 0xeb, 0x0a, 0xb5, 0xdc, 0x54, 0xda, 0xa5, 0xf4, 0x06, 0x0f, 0x17, 0x2c, 0x64, 0xe9, 0x4b, - 0xfc, 0x08, 0x5d, 0x9f, 0x56, 0x11, 0x81, 0x31, 0x83, 0xe7, 0x8f, 0xc1, 0x8f, 0x0f, 0x42, 0xad, - 0x34, 0x0b, 0xed, 0xaa, 0xf5, 0x7e, 0x14, 0xea, 0xd7, 0xef, 0xae, 0xff, 0x84, 0x9c, 0xc7, 0xc5, - 0xcf, 0x10, 0xf6, 0x81, 0x79, 0x63, 0x6e, 0x27, 0xe5, 0x97, 0x15, 0x04, 0x4a, 0xe2, 0xbb, 0x19, - 0x85, 0x3a, 0x26, 0x2b, 0xd6, 0xb3, 0x50, 0x7f, 0x77, 0x15, 0x4d, 0xca, 0x63, 0x8d, 0x56, 0xeb, - 0x0f, 0x05, 0xdd, 0x58, 0x6a, 0xe3, 0xb4, 0x63, 0x82, 0xb4, 0xe2, 0xf1, 0x33, 0x54, 0x89, 0x1f, - 0xa6, 0x4b, 0x25, 0x4d, 0xfa, 0xba, 0xb6, 0x7b, 0x33, 0xdf, 0x33, 0xa6, 0x6f, 0x76, 0x00, 0x92, - 0x5a, 0x38, 0x6b, 0x1a, 0x34, 0xc3, 0xc8, 0x54, 0x15, 0x7f, 0x8f, 0x2a, 0x99, 0x67, 0xa1, 0x6e, - 0x26, 0xdd, 0xf9, 0x59, 0xfe, 0x79, 0xb0, 0x74, 0x77, 0xab, 0x18, 0xbb, 0x22, 0x53, 0xc1, 0xd6, - 0x3f, 0x0a, 0x6a, 0xbe, 0x2a, 0xbe, 0x7d, 0x26, 0x24, 0x7e, 0xba, 0x12, 0xa3, 0x91, 0xb3, 0x54, - 0x99, 0x48, 0x23, 0xbc, 0x96, 0x45, 0x58, 0x99, 0x20, 0x73, 0xf1, 0x0d, 0x51, 0x89, 0x49, 0x70, - 0x27, 0xc1, 0xdd, 0xbb, 0x70, 0x70, 0x0b, 0x17, 0x9f, 0x4d, 0xa2, 0x6f, 0x62, 0x71, 0x92, 0xfa, - 0x68, 0xfd, 0xaa, 0xa0, 0x6b, 0x47, 0xe0, 0x8f, 0x99, 0x0d, 0x04, 0x7a, 0xe0, 0x83, 0x67, 0x03, - 0x36, 0x51, 0x75, 0xda, 0xa5, 0xd9, 0x70, 0x6e, 0x64, 0xec, 0xea, 0xb4, 0xa3, 0xc9, 0xec, 0x9b, - 0xe9, 0x20, 0xdf, 0x3c, 0x77, 0x90, 0xdf, 0x40, 0xc5, 0x11, 0x95, 0x03, 0xb5, 0x90, 0x7c, 0x51, - 0x89, 0xad, 0x0f, 0xa8, 0x1c, 0x90, 0x04, 0x4d, 0xac, 0xdc, 0x97, 0xc9, 0x18, 0x2c, 0x65, 0x56, - 0xee, 0x4b, 0x92, 0xa0, 0xad, 0xbf, 0xaf, 0xa0, 0xc6, 0x63, 0xea, 0xb0, 0xee, 0xe5, 0xf2, 0xb8, - 0x5c, 0x1e, 0xaf, 0x5f, 0x1e, 0xe8, 0x72, 0x79, 0x5c, 0x64, 0x79, 0xb4, 0x4e, 0x15, 0xa4, 0xad, - 0xb4, 0xd9, 0x9b, 0x1e, 0xee, 0x3f, 0xac, 0x0c, 0xf7, 0xcf, 0xf3, 0xf7, 0xeb, 0xca, 0xed, 0x57, - 0xc6, 0xfb, 0xbf, 0x0a, 0x6a, 0xbd, 0x3a, 0xc6, 0x37, 0x30, 0xe0, 0xdd, 0xc5, 0x01, 0xff, 0xf5, - 0xff, 0x08, 0x30, 0xcf, 0x88, 0xff, 0x4d, 0x41, 0x6f, 0xad, 0x99, 0x64, 0xf8, 0x3d, 0x54, 0x08, - 0x7c, 0x27, 0x9b, 0xc8, 0xe5, 0x28, 0xd4, 0x0b, 0x8f, 0xc8, 0x3e, 0x89, 0x31, 0x4c, 0x51, 0x59, - 0xa4, 0x4b, 0x21, 0x0b, 0xff, 0x4e, 0xfe, 0x3b, 0x2e, 0x6f, 0x13, 0xab, 0x16, 0x85, 0x7a, 0x79, - 0x82, 0x4e, 0x74, 0x71, 0x1b, 0x55, 0x6c, 0x6a, 0x05, 0x5e, 0xd7, 0x49, 0xd7, 0x46, 0xdd, 0xaa, - 0xc7, 0xe9, 0xda, 0xbb, 0x9b, 0x62, 0x64, 0x6a, 0xb5, 0x0e, 0x4f, 0x4e, 0xb5, 0x8d, 0x17, 0xa7, - 0xda, 0xc6, 0xcb, 0x53, 0x6d, 0xe3, 0xe7, 0x48, 0x53, 0x4e, 0x22, 0x4d, 0x79, 0x11, 0x69, 0xca, - 0xcb, 0x48, 0x53, 0xfe, 0x8c, 0x34, 0xe5, 0x97, 0xbf, 0xb4, 0x8d, 0xef, 0xda, 0x79, 0xff, 0xc6, - 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xc9, 0x34, 0x4c, 0x0a, 0x0e, 0x00, 0x00, + // 1041 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x73, 0xdb, 0xc4, + 0x1b, 0x8e, 0xe2, 0xf8, 0x17, 0x67, 0xed, 0x24, 0xcd, 0xfe, 0x80, 0x88, 0xd0, 0xb1, 0x3c, 0x3e, + 0x30, 0xbe, 0x20, 0xb5, 0x29, 0x03, 0xa5, 0x0c, 0x87, 0x2a, 0xb4, 0x03, 0x33, 0x49, 0x5a, 0x36, + 0xfd, 0x33, 0x03, 0x65, 0xa6, 0x6b, 0xf9, 0xb5, 0xbd, 0x58, 0xd2, 0x7a, 0xb4, 0xab, 0xb4, 0x19, + 0x2e, 0x7c, 0x04, 0xbe, 0x02, 0x1f, 0x84, 0x03, 0xb7, 0x1c, 0x7b, 0xec, 0x05, 0x0d, 0x11, 0x67, + 0x0e, 0x5c, 0x73, 0x62, 0xb4, 0x52, 0x6c, 0xcb, 0x76, 0x5a, 0x11, 0x66, 0x72, 0xca, 0xcd, 0xfb, + 0xbc, 0xfb, 0xbe, 0xcf, 0x3e, 0xab, 0x77, 0xdf, 0x67, 0x8c, 0xbe, 0x19, 0xdc, 0x16, 0x26, 0xe3, + 0xd6, 0x20, 0x6c, 0x43, 0xe0, 0x83, 0x04, 0x61, 0x1d, 0x82, 0xdf, 0xe1, 0x81, 0x95, 0x05, 0xe8, + 0x90, 0x59, 0xb4, 0xe3, 0x31, 0x21, 0x18, 0xf7, 0x03, 0xe8, 0x31, 0x21, 0x03, 0x2a, 0x19, 0xf7, + 0xad, 0xc3, 0x9b, 0x6d, 0x90, 0xf4, 0xa6, 0xd5, 0x03, 0x1f, 0x02, 0x2a, 0xa1, 0x63, 0x0e, 0x03, + 0x2e, 0x39, 0x6e, 0xa5, 0x99, 0x26, 0x1d, 0x32, 0x73, 0x6e, 0xa6, 0x99, 0x65, 0x6e, 0x7d, 0xd4, + 0x63, 0xb2, 0x1f, 0xb6, 0x4d, 0x87, 0x7b, 0x56, 0x8f, 0xf7, 0xb8, 0xa5, 0x0a, 0xb4, 0xc3, 0xae, + 0x5a, 0xa9, 0x85, 0xfa, 0x95, 0x16, 0xde, 0xba, 0x55, 0xe0, 0x48, 0xd3, 0xa7, 0xd9, 0xfa, 0x78, + 0x9c, 0xe4, 0x51, 0xa7, 0xcf, 0x7c, 0x08, 0x8e, 0xac, 0xe1, 0xa0, 0x97, 0x00, 0xc2, 0xf2, 0x40, + 0xd2, 0x79, 0x59, 0xd6, 0x79, 0x59, 0x41, 0xe8, 0x4b, 0xe6, 0xc1, 0x4c, 0xc2, 0x27, 0x6f, 0x4b, + 0x10, 0x4e, 0x1f, 0x3c, 0x3a, 0x9d, 0xd7, 0xec, 0xa2, 0xb5, 0x3d, 0x2a, 0x9d, 0xfe, 0x0e, 0xf7, + 0x3b, 0x2c, 0xd1, 0x80, 0x1b, 0x68, 0xc9, 0xa7, 0x1e, 0xe8, 0x5a, 0x43, 0x6b, 0xad, 0xd8, 0xb5, + 0xe3, 0xc8, 0x58, 0x88, 0x23, 0x63, 0x69, 0x9f, 0x7a, 0x40, 0x54, 0x04, 0x6f, 0x23, 0x04, 0x2f, + 0x87, 0x01, 0x28, 0xfd, 0xfa, 0xa2, 0xda, 0x87, 0xb3, 0x7d, 0xe8, 0xde, 0x28, 0x42, 0x26, 0x76, + 0x35, 0x7f, 0xab, 0xa0, 0xf5, 0xbd, 0x50, 0x52, 0xc9, 0xfc, 0xde, 0x53, 0x68, 0xf7, 0x39, 0x1f, + 0x14, 0x60, 0x7a, 0x81, 0x6a, 0x8e, 0xcb, 0xc0, 0x97, 0x3b, 0xdc, 0xef, 0xb2, 0x9e, 0xe2, 0xaa, + 0x6e, 0x7f, 0x61, 0x16, 0xfd, 0xc2, 0x66, 0x46, 0xb5, 0x33, 0x51, 0xc4, 0x7e, 0x27, 0x23, 0xaa, + 0x4d, 0xa2, 0x24, 0x47, 0x84, 0x9f, 0xa1, 0x72, 0x10, 0xba, 0x20, 0xf4, 0x52, 0xa3, 0xd4, 0xaa, + 0x6e, 0x7f, 0x5a, 0x84, 0xd1, 0x24, 0xa1, 0x0b, 0x4f, 0x99, 0xec, 0x3f, 0x18, 0x42, 0x0a, 0x0a, + 0x7b, 0x35, 0xe3, 0x2a, 0x27, 0x31, 0x41, 0xd2, 0xa2, 0x78, 0x17, 0xad, 0x76, 0x29, 0x73, 0xc3, + 0x00, 0x1e, 0x72, 0x97, 0x39, 0x47, 0xfa, 0x92, 0xba, 0x81, 0x0f, 0xe3, 0xc8, 0x58, 0xbd, 0x3f, + 0x19, 0x38, 0x8d, 0x8c, 0x8d, 0x1c, 0xf0, 0xe8, 0x68, 0x08, 0x24, 0x9f, 0x8c, 0xbf, 0x44, 0x55, + 0x2f, 0xf9, 0x84, 0x59, 0xad, 0x15, 0x55, 0xab, 0x19, 0x47, 0x46, 0x75, 0x6f, 0x0c, 0x9f, 0x46, + 0xc6, 0xfa, 0xc4, 0x52, 0xd5, 0x99, 0x4c, 0xc3, 0x2f, 0xd1, 0x46, 0x72, 0xe5, 0x62, 0x48, 0x1d, + 0x38, 0x00, 0x17, 0x1c, 0xc9, 0x03, 0xbd, 0xac, 0xee, 0xfb, 0xd6, 0x84, 0xfa, 0x51, 0x73, 0x99, + 0xc3, 0x41, 0x2f, 0x01, 0x84, 0x99, 0xf4, 0x70, 0x22, 0x7f, 0x97, 0xb6, 0xc1, 0x3d, 0x4b, 0xb5, + 0xdf, 0x8d, 0x23, 0x63, 0x63, 0x7f, 0xba, 0x22, 0x99, 0x25, 0xc1, 0x1c, 0xad, 0xf1, 0xf6, 0x0f, + 0xe0, 0xc8, 0x11, 0x6d, 0xf5, 0xe2, 0xb4, 0x38, 0x8e, 0x8c, 0xb5, 0x07, 0xb9, 0x72, 0x64, 0xaa, + 0x7c, 0x72, 0x61, 0x82, 0x75, 0xe0, 0x5e, 0xb7, 0x0b, 0x8e, 0x14, 0xfa, 0xff, 0xc6, 0x17, 0x76, + 0x30, 0x86, 0x93, 0x0b, 0x1b, 0x2f, 0x77, 0x5c, 0x2a, 0x04, 0x99, 0x4c, 0xc3, 0x77, 0xd0, 0x5a, + 0xf2, 0xb0, 0x78, 0x28, 0x0f, 0xc0, 0xe1, 0x7e, 0x47, 0xe8, 0xcb, 0x0d, 0xad, 0x55, 0x4e, 0x4f, + 0xf0, 0x28, 0x17, 0x21, 0x53, 0x3b, 0xf1, 0x63, 0xb4, 0x39, 0xea, 0x22, 0x02, 0x87, 0x0c, 0x5e, + 0x3c, 0x81, 0x20, 0x59, 0x08, 0xbd, 0xd2, 0x28, 0xb5, 0x56, 0xec, 0x0f, 0xe2, 0xc8, 0xd8, 0xbc, + 0x3b, 0x7f, 0x0b, 0x39, 0x2f, 0x17, 0x3f, 0x47, 0x38, 0x00, 0xe6, 0x1f, 0x72, 0x47, 0xb5, 0x5f, + 0xd6, 0x10, 0x48, 0xe9, 0xbb, 0x11, 0x47, 0x06, 0x26, 0x33, 0xd1, 0xd3, 0xc8, 0x78, 0x6f, 0x16, + 0x55, 0xed, 0x31, 0xa7, 0x16, 0xfe, 0x11, 0xad, 0x7b, 0xb9, 0x71, 0x21, 0xf4, 0x9a, 0x7a, 0x21, + 0xb7, 0x8b, 0xbf, 0xc9, 0xfc, 0xbc, 0xb1, 0x37, 0xb3, 0x27, 0xb2, 0x9e, 0xc7, 0x05, 0x99, 0x66, + 0x6a, 0xfe, 0xae, 0xa1, 0xeb, 0x53, 0x33, 0x24, 0x7d, 0xae, 0x61, 0xca, 0x80, 0x9f, 0xa3, 0x4a, + 0xd2, 0x15, 0x1d, 0x2a, 0xa9, 0x1a, 0x2a, 0xd5, 0xed, 0x1b, 0xc5, 0x7a, 0x28, 0x6d, 0x98, 0x3d, + 0x90, 0x74, 0x3c, 0xc8, 0xc6, 0x18, 0x19, 0x55, 0xc5, 0xdf, 0xa1, 0x4a, 0xc6, 0x2c, 0xf4, 0x45, + 0x25, 0xfc, 0xb3, 0x7f, 0x21, 0x3c, 0x7f, 0x76, 0x7b, 0x29, 0xa1, 0x22, 0xa3, 0x82, 0xcd, 0xbf, + 0x34, 0xd4, 0x78, 0x93, 0xbe, 0x5d, 0x26, 0x24, 0x7e, 0x36, 0xa3, 0xd1, 0x2c, 0xf8, 0x4e, 0x98, + 0x48, 0x15, 0x5e, 0xcb, 0x14, 0x56, 0xce, 0x90, 0x09, 0x7d, 0x03, 0x54, 0x66, 0x12, 0xbc, 0x33, + 0x71, 0xf7, 0x2f, 0x2c, 0x2e, 0x77, 0xf0, 0xf1, 0x18, 0xfc, 0x3a, 0x29, 0x4e, 0x52, 0x8e, 0xe6, + 0x2f, 0x1a, 0xba, 0x76, 0x00, 0xc1, 0x21, 0x73, 0x80, 0x40, 0x17, 0x02, 0xf0, 0x1d, 0xc0, 0x16, + 0x5a, 0x19, 0x8d, 0x88, 0xcc, 0x19, 0x36, 0xb2, 0xec, 0x95, 0xd1, 0x38, 0x21, 0xe3, 0x3d, 0x23, + 0x17, 0x59, 0x3c, 0xd7, 0x45, 0xae, 0xa3, 0xa5, 0x21, 0x95, 0x7d, 0xbd, 0xa4, 0x76, 0x54, 0x92, + 0xe8, 0x43, 0x2a, 0xfb, 0x44, 0xa1, 0x2a, 0xca, 0x03, 0xa9, 0x66, 0x70, 0x39, 0x8b, 0xf2, 0x40, + 0x12, 0x85, 0x36, 0x4f, 0x96, 0xd1, 0xc6, 0x13, 0xea, 0xb2, 0xce, 0x95, 0x73, 0x5d, 0x39, 0xd7, + 0xdb, 0x9d, 0x0b, 0x5d, 0x39, 0xd7, 0x85, 0x9c, 0x6b, 0x8e, 0xaf, 0x54, 0x2f, 0xcd, 0x57, 0x4e, + 0x34, 0x54, 0x9f, 0x79, 0xe3, 0x97, 0xed, 0x2c, 0xdf, 0xcf, 0x38, 0xcb, 0xe7, 0xc5, 0xa5, 0xcf, + 0x9c, 0x7e, 0xc6, 0x5b, 0xfe, 0xd6, 0x50, 0xf3, 0xcd, 0x1a, 0x2f, 0xc1, 0x5d, 0xbc, 0xbc, 0xbb, + 0x7c, 0xf5, 0x1f, 0x04, 0x16, 0xf1, 0x97, 0x5f, 0x35, 0xf4, 0xff, 0x39, 0x63, 0x14, 0xbf, 0x8f, + 0x4a, 0x61, 0xe0, 0x66, 0x76, 0xb0, 0x1c, 0x47, 0x46, 0xe9, 0x31, 0xd9, 0x25, 0x09, 0x86, 0x29, + 0x5a, 0x16, 0xa9, 0x23, 0x65, 0xf2, 0xef, 0x14, 0x3f, 0xe3, 0xb4, 0x95, 0xd9, 0xd5, 0x38, 0x32, + 0x96, 0xcf, 0xd0, 0xb3, 0xba, 0xb8, 0x85, 0x2a, 0x0e, 0xb5, 0x43, 0xbf, 0xe3, 0xa6, 0x9e, 0x55, + 0xb3, 0x6b, 0xc9, 0x75, 0xed, 0xdc, 0x4d, 0x31, 0x32, 0x8a, 0xda, 0xfb, 0xc7, 0x27, 0xf5, 0x85, + 0x57, 0x27, 0xf5, 0x85, 0xd7, 0x27, 0xf5, 0x85, 0x9f, 0xe2, 0xba, 0x76, 0x1c, 0xd7, 0xb5, 0x57, + 0x71, 0x5d, 0x7b, 0x1d, 0xd7, 0xb5, 0x3f, 0xe2, 0xba, 0xf6, 0xf3, 0x9f, 0xf5, 0x85, 0x6f, 0x5b, + 0x45, 0xff, 0x28, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x1f, 0xf5, 0x97, 0x1c, 0x6c, 0x0f, 0x00, + 0x00, +} + +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { @@ -369,6 +436,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -626,6 +707,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -871,6 +966,19 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *MutatingWebhook) Size() (n int) { if m == nil { return 0 @@ -920,6 +1028,12 @@ func (m *MutatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1022,6 +1136,12 @@ func (m *ValidatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1086,6 +1206,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} func (this *MutatingWebhook) String() string { if this == nil { return "nil" @@ -1095,6 +1226,11 @@ func (this *MutatingWebhook) String() string { repeatedStringForRules += fmt.Sprintf("%v", f) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&MutatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1107,6 +1243,7 @@ func (this *MutatingWebhook) String() string { `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1165,6 +1302,11 @@ func (this *ValidatingWebhook) String() string { repeatedStringForRules += fmt.Sprintf("%v", f) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&ValidatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1176,6 +1318,7 @@ func (this *ValidatingWebhook) String() string { `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1232,6 +1375,120 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1616,6 +1873,40 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2389,6 +2680,40 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index c7016afb..cfd75928 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -29,6 +29,35 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1beta1"; +// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook. +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { // The name of the admission webhook. @@ -177,6 +206,28 @@ message MutatingWebhook { // Defaults to "Never". // +optional optional string reinvocationPolicy = 10; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 12; } // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. @@ -356,6 +407,28 @@ message ValidatingWebhook { // Default to `['v1beta1']`. // +optional repeated string admissionReviewVersions = 8; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 11; } // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 5fdf8e3f..82ee7df9 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -283,6 +283,28 @@ type ValidatingWebhook struct { // Default to `['v1beta1']`. // +optional AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,rep,name=matchConditions"` } // MutatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -433,6 +455,28 @@ type MutatingWebhook struct { // Defaults to "Never". // +optional ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,rep,name=matchConditions"` } // ReinvocationPolicyType specifies what type of policy the admission hook uses. @@ -531,3 +575,32 @@ type ServiceReference struct { // +optional Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } + +// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook. +type MatchCondition struct { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index c57c5b7f..2c0a9f01 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -24,9 +24,19 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MatchCondition = map[string]string{ + "": "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.", + "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "expression": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", +} + +func (MatchCondition) SwaggerDoc() map[string]string { + return map_MatchCondition +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -40,6 +50,7 @@ var map_MutatingWebhook = map[string]string{ "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (MutatingWebhook) SwaggerDoc() map[string]string { @@ -90,6 +101,7 @@ var map_ValidatingWebhook = map[string]string{ "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (ValidatingWebhook) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index ced4af19..9c5299bd 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -27,6 +27,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in @@ -78,6 +94,11 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = new(ReinvocationPolicyType) **out = **in } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -229,6 +250,11 @@ func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go index 6de93420..3b75fa65 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ServerStorageVersion = map[string]string{ diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 534b550f..a7a7e7c5 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -127,6 +127,7 @@ message DaemonSetSpec { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template optional k8s.io.api.core.v1.PodTemplateSpec template = 2; @@ -277,6 +278,7 @@ message DeploymentSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. @@ -675,6 +677,7 @@ message StatefulSetSpec { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -735,7 +738,7 @@ message StatefulSetSpec { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional optional StatefulSetOrdinals ordinals = 11; } diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 09766c29..15dc3150 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -199,6 +199,7 @@ type StatefulSetSpec struct { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -259,7 +260,7 @@ type StatefulSetSpec struct { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -379,6 +380,7 @@ type DeploymentSpec struct { Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -638,6 +640,7 @@ type DaemonSetSpec struct { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 509bb11c..6676da06 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ @@ -85,7 +85,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -162,7 +162,7 @@ var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", - "template": "Template describes the pods that will be created.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -347,7 +347,7 @@ var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\".", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\". The only allowed template.spec.restartPolicy value is \"Always\".", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", @@ -355,7 +355,7 @@ var map_StatefulSetSpec = map[string]string{ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 15fb1aa8..245ec30f 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -47,10 +47,10 @@ message ControllerRevision { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Data is the serialized representation of the state. + // data is the serialized representation of the state. optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; - // Revision indicates the revision of the state represented by Data. + // revision indicates the revision of the state represented by Data. optional int64 revision = 3; } @@ -128,17 +128,18 @@ message DeploymentRollback { // DeploymentSpec is the specification of the desired behavior of the Deployment. message DeploymentSpec { - // Number of desired pods. This is a pointer to distinguish between explicit + // replicas is the number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. // +optional optional int32 replicas = 1; - // Label selector for pods. Existing ReplicaSets whose pods are + // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. @@ -146,28 +147,28 @@ message DeploymentSpec { // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional optional int32 minReadySeconds = 5; - // The number of old ReplicaSets to retain to allow rollback. + // revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. // Defaults to 2. // +optional optional int32 revisionHistoryLimit = 6; - // Indicates that the deployment is paused. + // paused indicates that the deployment is paused. // +optional optional bool paused = 7; // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. + // rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done. // +optional optional RollbackConfig rollbackTo = 8; - // The maximum time in seconds for a deployment to make progress before it + // progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded // reason will be surfaced in the deployment status. Note that progress will @@ -178,15 +179,15 @@ message DeploymentSpec { // DeploymentStatus is the most recently observed status of the Deployment. message DeploymentStatus { - // The generation observed by the deployment controller. + // observedGeneration is the generation observed by the deployment controller. // +optional optional int64 observedGeneration = 1; - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector). // +optional optional int32 replicas = 2; - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec. // +optional optional int32 updatedReplicas = 3; @@ -198,18 +199,18 @@ message DeploymentStatus { // +optional optional int32 availableReplicas = 4; - // Total number of unavailable pods targeted by this deployment. This is the total number of + // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of // pods that are still required for the deployment to have 100% available capacity. They may // either be pods that are running but not yet available or pods that still have not been created. // +optional optional int32 unavailableReplicas = 5; - // Represents the latest available observations of a deployment's current state. + // Conditions represent the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge repeated DeploymentCondition conditions = 6; - // Count of hash collisions for the Deployment. The Deployment controller uses this + // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this // field as a collision avoidance mechanism when it needs to create the name for the // newest ReplicaSet. // +optional @@ -276,7 +277,7 @@ message RollingUpdateStatefulSetStrategy { // This is helpful in being able to do a canary based deployment. The default value is 0. optional int32 partition = 1; - // The maximum number of pods that can be unavailable during the update. + // maxUnavailable is the maximum number of pods that can be unavailable during the update. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). // Absolute number is calculated from percentage by rounding up. This can not be 0. // Defaults to 1. This field is alpha-level and is only honored by servers that enable the @@ -293,32 +294,32 @@ message Scale { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } // ScaleSpec describes the attributes of a scale subresource message ScaleSpec { - // desired number of instances for the scaled object. + // replicas is the number of observed instances of the scaled object. // +optional optional int32 replicas = 1; } // ScaleStatus represents the current status of a scale subresource. message ScaleStatus { - // actual number of observed instances of the scaled object. + // replias is the actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional map selector = 2; - // label selector for pods that should match the replicas count. This is a serializated + // targetSelector is the label selector for pods that should match the replicas count. This is a serializated // version of both map-based and more expressive set-based selectors. This is done to // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this @@ -398,13 +399,13 @@ message StatefulSetOrdinals { // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. message StatefulSetPersistentVolumeClaimRetentionPolicy { - // WhenDeleted specifies what happens to PVCs created from StatefulSet + // whenDeleted specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is deleted. The default policy // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The // `Delete` policy causes those PVCs to be deleted. optional string whenDeleted = 1; - // WhenScaled specifies what happens to PVCs created from StatefulSet + // whenScaled specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is scaled down. The default // policy of `Retain` causes PVCs to not be affected by a scaledown. The // `Delete` policy causes the associated PVCs for any excess pods above @@ -475,7 +476,7 @@ message StatefulSetSpec { // StatefulSetSpec version. The default value is 10. optional int32 revisionHistoryLimit = 8; - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional @@ -491,7 +492,7 @@ message StatefulSetSpec { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional optional StatefulSetOrdinals ordinals = 11; } @@ -531,13 +532,13 @@ message StatefulSetStatus { // +optional optional int32 collisionCount = 9; - // Represents the latest available observations of a statefulset's current state. + // conditions represent the latest available observations of a statefulset's current state. // +optional // +patchMergeKey=type // +patchStrategy=merge repeated StatefulSetCondition conditions = 10; - // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. + // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // +optional optional int32 availableReplicas = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 91002309..59ed9c2a 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -31,21 +31,21 @@ const ( // ScaleSpec describes the attributes of a scale subresource type ScaleSpec struct { - // desired number of instances for the scaled object. + // replicas is the number of observed instances of the scaled object. // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { - // actual number of observed instances of the scaled object. + // replias is the actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - // label selector for pods that should match the replicas count. This is a serializated + // targetSelector is the label selector for pods that should match the replicas count. This is a serializated // version of both map-based and more expressive set-based selectors. This is done to // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this @@ -68,11 +68,11 @@ type Scale struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -159,7 +159,7 @@ type RollingUpdateStatefulSetStrategy struct { // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. // This is helpful in being able to do a canary based deployment. The default value is 0. Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` - // The maximum number of pods that can be unavailable during the update. + // maxUnavailable is the maximum number of pods that can be unavailable during the update. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). // Absolute number is calculated from percentage by rounding up. This can not be 0. // Defaults to 1. This field is alpha-level and is only honored by servers that enable the @@ -191,12 +191,12 @@ const ( // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. type StatefulSetPersistentVolumeClaimRetentionPolicy struct { - // WhenDeleted specifies what happens to PVCs created from StatefulSet + // whenDeleted specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is deleted. The default policy // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The // `Delete` policy causes those PVCs to be deleted. WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty" protobuf:"bytes,1,opt,name=whenDeleted,casttype=PersistentVolumeClaimRetentionPolicyType"` - // WhenScaled specifies what happens to PVCs created from StatefulSet + // whenScaled specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is scaled down. The default // policy of `Retain` causes PVCs to not be affected by a scaledown. The // `Delete` policy causes the associated PVCs for any excess pods above @@ -282,7 +282,7 @@ type StatefulSetSpec struct { // StatefulSetSpec version. The default value is 10. RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional @@ -298,7 +298,7 @@ type StatefulSetSpec struct { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -338,13 +338,13 @@ type StatefulSetStatus struct { // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` - // Represents the latest available observations of a statefulset's current state. + // conditions represent the latest available observations of a statefulset's current state. // +optional // +patchMergeKey=type // +patchStrategy=merge Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` - // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. + // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // +optional AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } @@ -409,17 +409,18 @@ type Deployment struct { // DeploymentSpec is the specification of the desired behavior of the Deployment. type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit + // replicas is the number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. // +optional Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - // Label selector for pods. Existing ReplicaSets whose pods are + // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -427,28 +428,28 @@ type DeploymentSpec struct { // +patchStrategy=retainKeys Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` - // The number of old ReplicaSets to retain to allow rollback. + // revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. // Defaults to 2. // +optional RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` - // Indicates that the deployment is paused. + // paused indicates that the deployment is paused. // +optional Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. + // rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done. // +optional RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` - // The maximum time in seconds for a deployment to make progress before it + // progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded // reason will be surfaced in the deployment status. Note that progress will @@ -547,15 +548,15 @@ type RollingUpdateDeployment struct { // DeploymentStatus is the most recently observed status of the Deployment. type DeploymentStatus struct { - // The generation observed by the deployment controller. + // observedGeneration is the generation observed by the deployment controller. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector). // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec. // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` @@ -567,18 +568,18 @@ type DeploymentStatus struct { // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - // Total number of unavailable pods targeted by this deployment. This is the total number of + // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of // pods that are still required for the deployment to have 100% available capacity. They may // either be pods that are running but not yet available or pods that still have not been created. // +optional UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` - // Represents the latest available observations of a deployment's current state. + // Conditions represent the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` - // Count of hash collisions for the Deployment. The Deployment controller uses this + // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this // field as a collision avoidance mechanism when it needs to create the name for the // newest ReplicaSet. // +optional @@ -660,10 +661,10 @@ type ControllerRevision struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Data is the serialized representation of the state. + // data is the serialized representation of the state. Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` - // Revision indicates the revision of the state represented by Data. + // revision indicates the revision of the state represented by Data. Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` } diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 00d6d182..a62e9869 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "data": "Data is the serialized representation of the state.", - "revision": "Revision indicates the revision of the state represented by Data.", + "data": "data is the serialized representation of the state.", + "revision": "revision indicates the revision of the state represented by Data.", } func (ControllerRevision) SwaggerDoc() map[string]string { @@ -96,15 +96,15 @@ func (DeploymentRollback) SwaggerDoc() map[string]string { var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", - "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", - "template": "Template describes the pods that will be created.", + "replicas": "replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "selector is the label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", - "paused": "Indicates that the deployment is paused.", - "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", - "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", + "paused": "paused indicates that the deployment is paused.", + "rollbackTo": "DEPRECATED. rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", } func (DeploymentSpec) SwaggerDoc() map[string]string { @@ -113,14 +113,14 @@ func (DeploymentSpec) SwaggerDoc() map[string]string { var map_DeploymentStatus = map[string]string{ "": "DeploymentStatus is the most recently observed status of the Deployment.", - "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "observedGeneration": "observedGeneration is the generation observed by the deployment controller.", + "replicas": "replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec.", "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", - "conditions": "Represents the latest available observations of a deployment's current state.", - "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", + "unavailableReplicas": "unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Conditions represent the latest available observations of a deployment's current state.", + "collisionCount": "collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", } func (DeploymentStatus) SwaggerDoc() map[string]string { @@ -159,7 +159,7 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { var map_RollingUpdateStatefulSetStrategy = map[string]string{ "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0.", - "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.", + "maxUnavailable": "maxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.", } func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { @@ -169,8 +169,8 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "spec": "spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -179,7 +179,7 @@ func (Scale) SwaggerDoc() map[string]string { var map_ScaleSpec = map[string]string{ "": "ScaleSpec describes the attributes of a scale subresource", - "replicas": "desired number of instances for the scaled object.", + "replicas": "replicas is the number of observed instances of the scaled object.", } func (ScaleSpec) SwaggerDoc() map[string]string { @@ -188,9 +188,9 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "replicas": "replias is the actual number of observed instances of the scaled object.", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", + "targetSelector": "targetSelector is the label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } func (ScaleStatus) SwaggerDoc() map[string]string { @@ -239,8 +239,8 @@ func (StatefulSetOrdinals) SwaggerDoc() map[string]string { var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", - "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", - "whenScaled": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", + "whenDeleted": "whenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "whenScaled": "whenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", } func (StatefulSetPersistentVolumeClaimRetentionPolicy) SwaggerDoc() map[string]string { @@ -257,9 +257,9 @@ var map_StatefulSetSpec = map[string]string{ "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -276,8 +276,8 @@ var map_StatefulSetStatus = map[string]string{ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", - "conditions": "Represents the latest available observations of a statefulset's current state.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.", + "conditions": "conditions represent the latest available observations of a statefulset's current state.", + "availableReplicas": "availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index af8c4fe4..ddbe3544 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -131,6 +131,7 @@ message DaemonSetSpec { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template optional k8s.io.api.core.v1.PodTemplateSpec template = 2; @@ -282,6 +283,7 @@ message DeploymentSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. @@ -600,7 +602,7 @@ message ScaleStatus { // actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic map selector = 2; @@ -720,6 +722,7 @@ message StatefulSetSpec { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -777,7 +780,7 @@ message StatefulSetSpec { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional optional StatefulSetOrdinals ordinals = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index dbe4d23b..a97ac6fc 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -43,7 +43,7 @@ type ScaleStatus struct { // actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` @@ -250,6 +250,7 @@ type StatefulSetSpec struct { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -307,7 +308,7 @@ type StatefulSetSpec struct { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -429,6 +430,7 @@ type DeploymentSpec struct { Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -690,6 +692,7 @@ type DaemonSetSpec struct { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 1936a246..d7e92099 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ @@ -85,7 +85,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -162,7 +162,7 @@ var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", - "template": "Template describes the pods that will be created.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -313,7 +313,7 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } @@ -375,7 +375,7 @@ var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\".", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\". The only allowed template.spec.restartPolicy value is \"Always\".", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", @@ -383,7 +383,7 @@ var map_StatefulSetSpec = map[string]string{ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index 5d37ac1f..b1a730b8 100644 --- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_BoundObjectReference = map[string]string{ diff --git a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto index 3198dce3..51d92524 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto @@ -30,7 +30,8 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; option go_package = "k8s.io/api/authentication/v1alpha1"; // SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. -// When using impersonation, users will receive the user info of the user being impersonated. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. message SelfSubjectReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata diff --git a/vendor/k8s.io/api/authentication/v1alpha1/types.go b/vendor/k8s.io/api/authentication/v1alpha1/types.go index da65028c..1ee3612f 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/types.go +++ b/vendor/k8s.io/api/authentication/v1alpha1/types.go @@ -25,10 +25,11 @@ import ( // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.25 +// +k8s:prerelease-lifecycle-gen:introduced=1.26 // SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. -// When using impersonation, users will receive the user info of the user being impersonated. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. type SelfSubjectReview struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. diff --git a/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go index bc17c5f3..1ffcc99e 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go @@ -24,11 +24,11 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_SelfSubjectReview = map[string]string{ - "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated.", + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "status": "Status is filled in by the server with the user attributes.", } diff --git a/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go index b86dfbef..62a70a78 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -24,17 +24,17 @@ package v1alpha1 // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { - return 1, 25 + return 1, 26 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *SelfSubjectReview) APILifecycleDeprecated() (major, minor int) { - return 1, 28 + return 1, 29 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *SelfSubjectReview) APILifecycleRemoved() (major, minor int) { - return 1, 31 + return 1, 32 } diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 1978dcf6..7f1d5ca6 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -72,10 +72,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() { var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } +func (*SelfSubjectReview) ProtoMessage() {} +func (*SelfSubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{1} +} +func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReview.Merge(m, src) +} +func (m *SelfSubjectReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo + +func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } +func (*SelfSubjectReviewStatus) ProtoMessage() {} +func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{2} +} +func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src) +} +func (m *SelfSubjectReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo + func (m *TokenReview) Reset() { *m = TokenReview{} } func (*TokenReview) ProtoMessage() {} func (*TokenReview) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{1} + return fileDescriptor_77c9b20d3ad27844, []int{3} } func (m *TokenReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +159,7 @@ var xxx_messageInfo_TokenReview proto.InternalMessageInfo func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } func (*TokenReviewSpec) ProtoMessage() {} func (*TokenReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{2} + return fileDescriptor_77c9b20d3ad27844, []int{4} } func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +187,7 @@ var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } func (*TokenReviewStatus) ProtoMessage() {} func (*TokenReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{3} + return fileDescriptor_77c9b20d3ad27844, []int{5} } func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +215,7 @@ var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo func (m *UserInfo) Reset() { *m = UserInfo{} } func (*UserInfo) ProtoMessage() {} func (*UserInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{4} + return fileDescriptor_77c9b20d3ad27844, []int{6} } func (m *UserInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,6 +242,8 @@ var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") + proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1beta1.SelfSubjectReview") + proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.SelfSubjectReviewStatus") proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1beta1.TokenReview") proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") @@ -198,49 +256,53 @@ func init() { } var fileDescriptor_77c9b20d3ad27844 = []byte{ - // 666 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x4e, 0x13, 0x5f, - 0x14, 0x9e, 0xe9, 0x1f, 0xd2, 0xde, 0xfe, 0xfa, 0x13, 0x6f, 0x62, 0xd2, 0x34, 0x71, 0x0a, 0x75, - 0x43, 0x82, 0xdc, 0x11, 0x42, 0x90, 0xe0, 0x8a, 0x51, 0x42, 0x30, 0x21, 0x26, 0x57, 0x70, 0xa1, - 0x2e, 0xbc, 0x9d, 0x1e, 0xa6, 0x63, 0x9d, 0x3f, 0xb9, 0x73, 0xa7, 0xca, 0x8e, 0x47, 0x70, 0xe9, - 0xd2, 0xc4, 0x27, 0x71, 0xc7, 0x92, 0x25, 0x0b, 0xd3, 0xc8, 0xf8, 0x04, 0xbe, 0x81, 0xb9, 0x77, - 0x2e, 0x4c, 0x81, 0x68, 0x61, 0x37, 0xf7, 0x3b, 0xe7, 0xfb, 0xce, 0x39, 0xdf, 0xe9, 0x29, 0x7a, - 0x3e, 0x5c, 0x4f, 0x88, 0x1f, 0xd9, 0xc3, 0xb4, 0x07, 0x3c, 0x04, 0x01, 0x89, 0x3d, 0x82, 0xb0, - 0x1f, 0x71, 0x5b, 0x07, 0x58, 0xec, 0xdb, 0x2c, 0x15, 0x03, 0x08, 0x85, 0xef, 0x32, 0xe1, 0x47, - 0xa1, 0x3d, 0x5a, 0xee, 0x81, 0x60, 0xcb, 0xb6, 0x07, 0x21, 0x70, 0x26, 0xa0, 0x4f, 0x62, 0x1e, - 0x89, 0x08, 0xcf, 0xe7, 0x14, 0xc2, 0x62, 0x9f, 0x5c, 0xa6, 0x10, 0x4d, 0x69, 0x2f, 0x79, 0xbe, - 0x18, 0xa4, 0x3d, 0xe2, 0x46, 0x81, 0xed, 0x45, 0x5e, 0x64, 0x2b, 0x66, 0x2f, 0x3d, 0x50, 0x2f, - 0xf5, 0x50, 0x5f, 0xb9, 0x62, 0x7b, 0xb5, 0x68, 0x22, 0x60, 0xee, 0xc0, 0x0f, 0x81, 0x1f, 0xda, - 0xf1, 0xd0, 0x93, 0x40, 0x62, 0x07, 0x20, 0x98, 0x3d, 0xba, 0xd6, 0x47, 0xdb, 0xfe, 0x1b, 0x8b, - 0xa7, 0xa1, 0xf0, 0x03, 0xb8, 0x46, 0x58, 0x9b, 0x46, 0x48, 0xdc, 0x01, 0x04, 0xec, 0x2a, 0xaf, - 0xfb, 0x18, 0xa1, 0xad, 0x4f, 0x82, 0xb3, 0x57, 0xec, 0x43, 0x0a, 0xb8, 0x83, 0xaa, 0xbe, 0x80, - 0x20, 0x69, 0x99, 0x73, 0xe5, 0x85, 0xba, 0x53, 0xcf, 0xc6, 0x9d, 0xea, 0x8e, 0x04, 0x68, 0x8e, - 0x6f, 0xd4, 0xbe, 0x7c, 0xed, 0x18, 0x47, 0x3f, 0xe6, 0x8c, 0xee, 0xb7, 0x12, 0x6a, 0xec, 0x45, - 0x43, 0x08, 0x29, 0x8c, 0x7c, 0xf8, 0x88, 0xdf, 0xa1, 0x9a, 0x1c, 0xa6, 0xcf, 0x04, 0x6b, 0x99, - 0x73, 0xe6, 0x42, 0x63, 0xe5, 0x11, 0x29, 0xcc, 0xbc, 0xe8, 0x89, 0xc4, 0x43, 0x4f, 0x02, 0x09, - 0x91, 0xd9, 0x64, 0xb4, 0x4c, 0x5e, 0xf4, 0xde, 0x83, 0x2b, 0x76, 0x41, 0x30, 0x07, 0x1f, 0x8f, - 0x3b, 0x46, 0x36, 0xee, 0xa0, 0x02, 0xa3, 0x17, 0xaa, 0x78, 0x0f, 0x55, 0x92, 0x18, 0xdc, 0x56, - 0x49, 0xa9, 0xaf, 0x90, 0xa9, 0xab, 0x22, 0x13, 0xfd, 0xbd, 0x8c, 0xc1, 0x75, 0xfe, 0xd3, 0xfa, - 0x15, 0xf9, 0xa2, 0x4a, 0x0d, 0xbf, 0x45, 0x33, 0x89, 0x60, 0x22, 0x4d, 0x5a, 0x65, 0xa5, 0xbb, - 0x7a, 0x4b, 0x5d, 0xc5, 0x75, 0xfe, 0xd7, 0xca, 0x33, 0xf9, 0x9b, 0x6a, 0xcd, 0xae, 0x8b, 0xee, - 0x5c, 0x69, 0x02, 0x3f, 0x40, 0x55, 0x21, 0x21, 0xe5, 0x52, 0xdd, 0x69, 0x6a, 0x66, 0x35, 0xcf, - 0xcb, 0x63, 0x78, 0x11, 0xd5, 0x59, 0xda, 0xf7, 0x21, 0x74, 0x21, 0x69, 0x95, 0xd4, 0x32, 0x9a, - 0xd9, 0xb8, 0x53, 0xdf, 0x3c, 0x07, 0x69, 0x11, 0xef, 0xfe, 0x36, 0xd1, 0xdd, 0x6b, 0x2d, 0xe1, - 0x27, 0xa8, 0x39, 0xd1, 0x3e, 0xf4, 0x55, 0xbd, 0x9a, 0x73, 0x4f, 0xd7, 0x6b, 0x6e, 0x4e, 0x06, - 0xe9, 0xe5, 0x5c, 0xbc, 0x8b, 0x2a, 0x69, 0x02, 0x5c, 0x7b, 0xbd, 0x78, 0x03, 0x4f, 0xf6, 0x13, - 0xe0, 0x3b, 0xe1, 0x41, 0x54, 0x98, 0x2c, 0x11, 0xaa, 0x64, 0x2e, 0x8f, 0x53, 0xf9, 0xf7, 0x38, - 0xd2, 0x20, 0xe0, 0x3c, 0xe2, 0x6a, 0x21, 0x13, 0x06, 0x6d, 0x49, 0x90, 0xe6, 0xb1, 0xee, 0xf7, - 0x12, 0xaa, 0x9d, 0x97, 0xc4, 0x0f, 0x51, 0x4d, 0x96, 0x09, 0x59, 0x00, 0xda, 0xd5, 0x59, 0x4d, - 0x52, 0x39, 0x12, 0xa7, 0x17, 0x19, 0xf8, 0x3e, 0x2a, 0xa7, 0x7e, 0x5f, 0x8d, 0x56, 0x77, 0x1a, - 0x3a, 0xb1, 0xbc, 0xbf, 0xf3, 0x8c, 0x4a, 0x1c, 0x77, 0xd1, 0x8c, 0xc7, 0xa3, 0x34, 0x96, 0x3f, - 0x08, 0xd9, 0x28, 0x92, 0x6b, 0xdd, 0x56, 0x08, 0xd5, 0x11, 0xfc, 0x06, 0x55, 0x41, 0x5e, 0x8d, - 0x9a, 0xa5, 0xb1, 0xb2, 0x76, 0x0b, 0x7f, 0x88, 0x3a, 0xb7, 0xad, 0x50, 0xf0, 0xc3, 0x89, 0xd1, - 0x24, 0x46, 0x73, 0xcd, 0xb6, 0xa7, 0x4f, 0x52, 0xe5, 0xe0, 0x59, 0x54, 0x1e, 0xc2, 0x61, 0x3e, - 0x16, 0x95, 0x9f, 0xf8, 0x29, 0xaa, 0x8e, 0xe4, 0xb5, 0xea, 0xe5, 0x2c, 0xdd, 0xa0, 0x78, 0x71, - 0xe2, 0x34, 0xe7, 0x6e, 0x94, 0xd6, 0x4d, 0x67, 0xfb, 0xf8, 0xcc, 0x32, 0x4e, 0xce, 0x2c, 0xe3, - 0xf4, 0xcc, 0x32, 0x8e, 0x32, 0xcb, 0x3c, 0xce, 0x2c, 0xf3, 0x24, 0xb3, 0xcc, 0xd3, 0xcc, 0x32, - 0x7f, 0x66, 0x96, 0xf9, 0xf9, 0x97, 0x65, 0xbc, 0x9e, 0x9f, 0xfa, 0x2f, 0xfa, 0x27, 0x00, 0x00, - 0xff, 0xff, 0xb8, 0x72, 0x2c, 0x2c, 0x82, 0x05, 0x00, 0x00, + // 725 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4f, 0x4f, 0x13, 0x41, + 0x14, 0xef, 0xf6, 0x0f, 0x69, 0xa7, 0x56, 0x61, 0x12, 0x23, 0x69, 0xe2, 0x16, 0x6a, 0x62, 0x48, + 0x80, 0x59, 0x21, 0x04, 0x09, 0x9e, 0x58, 0x25, 0x04, 0x13, 0x62, 0x32, 0x05, 0x0f, 0xea, 0xc1, + 0xe9, 0xf6, 0xb1, 0x5d, 0x4b, 0x77, 0x37, 0xbb, 0xb3, 0x55, 0x6e, 0x7c, 0x04, 0x8f, 0x1e, 0x4d, + 0xfc, 0x24, 0xde, 0x38, 0x72, 0xc4, 0xc4, 0x34, 0xb2, 0x7e, 0x02, 0xbf, 0x81, 0x99, 0xd9, 0x61, + 0xdb, 0x82, 0x14, 0xb8, 0x78, 0xdb, 0xf9, 0xcd, 0xfb, 0xfd, 0xde, 0x7b, 0xbf, 0xf7, 0x32, 0x8b, + 0x5e, 0x76, 0xd6, 0x42, 0xe2, 0x78, 0x46, 0x27, 0x6a, 0x42, 0xe0, 0x02, 0x87, 0xd0, 0xe8, 0x81, + 0xdb, 0xf2, 0x02, 0x43, 0x5d, 0x30, 0xdf, 0x31, 0x58, 0xc4, 0xdb, 0xe0, 0x72, 0xc7, 0x62, 0xdc, + 0xf1, 0x5c, 0xa3, 0xb7, 0xd4, 0x04, 0xce, 0x96, 0x0c, 0x1b, 0x5c, 0x08, 0x18, 0x87, 0x16, 0xf1, + 0x03, 0x8f, 0x7b, 0x78, 0x36, 0xa1, 0x10, 0xe6, 0x3b, 0x64, 0x94, 0x42, 0x14, 0xa5, 0xba, 0x68, + 0x3b, 0xbc, 0x1d, 0x35, 0x89, 0xe5, 0x75, 0x0d, 0xdb, 0xb3, 0x3d, 0x43, 0x32, 0x9b, 0xd1, 0xbe, + 0x3c, 0xc9, 0x83, 0xfc, 0x4a, 0x14, 0xab, 0x0b, 0xe3, 0x8a, 0xb8, 0x98, 0xbf, 0xba, 0x32, 0x88, + 0xee, 0x32, 0xab, 0xed, 0xb8, 0x10, 0x1c, 0x1a, 0x7e, 0xc7, 0x16, 0x40, 0x68, 0x74, 0x81, 0xb3, + 0x7f, 0xb1, 0x8c, 0xab, 0x58, 0x41, 0xe4, 0x72, 0xa7, 0x0b, 0x97, 0x08, 0xab, 0xd7, 0x11, 0x42, + 0xab, 0x0d, 0x5d, 0x76, 0x91, 0x57, 0x7f, 0x8a, 0xd0, 0xe6, 0x27, 0x1e, 0xb0, 0xd7, 0xec, 0x20, + 0x02, 0x5c, 0x43, 0x05, 0x87, 0x43, 0x37, 0x9c, 0xd6, 0x66, 0x72, 0x73, 0x25, 0xb3, 0x14, 0xf7, + 0x6b, 0x85, 0x6d, 0x01, 0xd0, 0x04, 0x5f, 0x2f, 0x7e, 0xf9, 0x5a, 0xcb, 0x1c, 0xfd, 0x9c, 0xc9, + 0xd4, 0x7f, 0x68, 0x68, 0xaa, 0x01, 0x07, 0xfb, 0x8d, 0xa8, 0xf9, 0x01, 0x2c, 0x4e, 0xa1, 0xe7, + 0xc0, 0x47, 0xfc, 0x1e, 0x15, 0x45, 0x4b, 0x2d, 0xc6, 0xd9, 0xb4, 0x36, 0xa3, 0xcd, 0x95, 0x97, + 0x9f, 0x90, 0xc1, 0x00, 0xd2, 0xca, 0x88, 0xdf, 0xb1, 0x05, 0x10, 0x12, 0x11, 0x4d, 0x7a, 0x4b, + 0xe4, 0x95, 0x54, 0xd9, 0x01, 0xce, 0x4c, 0x7c, 0xdc, 0xaf, 0x65, 0xe2, 0x7e, 0x0d, 0x0d, 0x30, + 0x9a, 0xaa, 0xe2, 0x26, 0x9a, 0x08, 0x39, 0xe3, 0x51, 0x38, 0x9d, 0x95, 0xfa, 0xeb, 0xe4, 0xda, + 0x01, 0x93, 0x4b, 0x75, 0x36, 0xa4, 0x82, 0x79, 0x57, 0x65, 0x9a, 0x48, 0xce, 0x54, 0x29, 0xd7, + 0x3d, 0xf4, 0xe0, 0x0a, 0x0a, 0xde, 0x45, 0xc5, 0x28, 0x84, 0x60, 0xdb, 0xdd, 0xf7, 0x54, 0x83, + 0x8f, 0xc7, 0x16, 0x40, 0xf6, 0x54, 0xb4, 0x39, 0xa9, 0x92, 0x15, 0xcf, 0x11, 0x9a, 0x2a, 0xd5, + 0xbf, 0x65, 0x51, 0x79, 0xd7, 0xeb, 0x80, 0xfb, 0xdf, 0x6c, 0xdc, 0x45, 0xf9, 0xd0, 0x07, 0x4b, + 0x99, 0xb8, 0x7c, 0x03, 0x13, 0x87, 0xea, 0x6b, 0xf8, 0x60, 0x99, 0x77, 0x94, 0x7e, 0x5e, 0x9c, + 0xa8, 0x54, 0xc3, 0xef, 0xd2, 0xe1, 0xe4, 0xa4, 0xee, 0xca, 0x2d, 0x75, 0xc7, 0x8f, 0xc5, 0x42, + 0xf7, 0x2e, 0x14, 0x81, 0x1f, 0xa1, 0x02, 0x17, 0x90, 0x74, 0xa9, 0x64, 0x56, 0x14, 0xb3, 0x90, + 0xc4, 0x25, 0x77, 0x78, 0x1e, 0x95, 0x58, 0xd4, 0x72, 0xc0, 0xb5, 0x40, 0x6c, 0x8d, 0xd8, 0xec, + 0x4a, 0xdc, 0xaf, 0x95, 0x36, 0xce, 0x41, 0x3a, 0xb8, 0xaf, 0xff, 0xd1, 0xd0, 0xd4, 0xa5, 0x92, + 0xf0, 0x33, 0x54, 0x19, 0x2a, 0x1f, 0x5a, 0x32, 0x5f, 0xd1, 0xbc, 0xaf, 0xf2, 0x55, 0x36, 0x86, + 0x2f, 0xe9, 0x68, 0x2c, 0xde, 0x41, 0x79, 0x31, 0x69, 0xe5, 0xf5, 0xfc, 0x0d, 0x3c, 0x49, 0x97, + 0x26, 0x35, 0x59, 0x20, 0x54, 0xca, 0x8c, 0xb6, 0x93, 0x1f, 0xdf, 0x8e, 0x30, 0x08, 0x82, 0xc0, + 0x0b, 0xe4, 0x40, 0x86, 0x0c, 0xda, 0x14, 0x20, 0x4d, 0xee, 0xea, 0xdf, 0xb3, 0x28, 0xdd, 0x4a, + 0xbc, 0x90, 0x6c, 0xb8, 0xcb, 0xba, 0xa0, 0x5c, 0x1d, 0xd9, 0x5c, 0x81, 0xd3, 0x34, 0x02, 0x3f, + 0x44, 0xb9, 0xc8, 0x69, 0xc9, 0xd6, 0x4a, 0x66, 0x59, 0x05, 0xe6, 0xf6, 0xb6, 0x5f, 0x50, 0x81, + 0xe3, 0x3a, 0x9a, 0xb0, 0x03, 0x2f, 0xf2, 0xc5, 0x42, 0x88, 0x42, 0x91, 0x18, 0xeb, 0x96, 0x44, + 0xa8, 0xba, 0xc1, 0x6f, 0x51, 0x01, 0xc4, 0x13, 0x24, 0x7b, 0x29, 0x2f, 0xaf, 0xde, 0xc2, 0x1f, + 0x22, 0xdf, 0xae, 0x4d, 0x97, 0x07, 0x87, 0x43, 0xad, 0x09, 0x8c, 0x26, 0x9a, 0x55, 0x5b, 0xbd, + 0x6f, 0x32, 0x06, 0x4f, 0xa2, 0x5c, 0x07, 0x0e, 0x93, 0xb6, 0xa8, 0xf8, 0xc4, 0xcf, 0x51, 0xa1, + 0x27, 0x9e, 0x3e, 0x35, 0x9c, 0xc5, 0x1b, 0x24, 0x1f, 0xbc, 0x97, 0x34, 0xe1, 0xae, 0x67, 0xd7, + 0x34, 0x73, 0xeb, 0xf8, 0x4c, 0xcf, 0x9c, 0x9c, 0xe9, 0x99, 0xd3, 0x33, 0x3d, 0x73, 0x14, 0xeb, + 0xda, 0x71, 0xac, 0x6b, 0x27, 0xb1, 0xae, 0x9d, 0xc6, 0xba, 0xf6, 0x2b, 0xd6, 0xb5, 0xcf, 0xbf, + 0xf5, 0xcc, 0x9b, 0xd9, 0x6b, 0x7f, 0x60, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x19, 0x49, + 0x3f, 0xfd, 0x06, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -275,6 +337,82 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -517,6 +655,30 @@ func (m ExtraValue) Size() (n int) { return n } +func (m *SelfSubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *TokenReview) Size() (n int) { if m == nil { return 0 @@ -603,6 +765,27 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *SelfSubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SelfSubjectReviewStatus", "SelfSubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReviewStatus{`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *TokenReview) String() string { if this == nil { return "nil" @@ -752,6 +935,205 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } return nil } +func (m *SelfSubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TokenReview) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index d1847a02..53b4635d 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.authentication.v1beta1; +import "k8s.io/api/authentication/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -37,6 +38,26 @@ message ExtraValue { repeated string items = 1; } +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +message SelfSubjectReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Status is filled in by the server with the user attributes. + optional SelfSubjectReviewStatus status = 2; +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +message SelfSubjectReviewStatus { + // User attributes of the user making this request. + // +optional + optional k8s.io.api.authentication.v1.UserInfo userInfo = 1; +} + // TokenReview attempts to authenticate a token to a known user. // Note: TokenReview requests may be cached by the webhook token authenticator // plugin in the kube-apiserver. diff --git a/vendor/k8s.io/api/authentication/v1beta1/register.go b/vendor/k8s.io/api/authentication/v1beta1/register.go index ed23e50f..075ee126 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/register.go +++ b/vendor/k8s.io/api/authentication/v1beta1/register.go @@ -44,6 +44,7 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectReview{}, &TokenReview{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go index 08e1e09b..5bce82e7 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -19,6 +19,7 @@ package v1beta1 import ( "fmt" + v1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -113,3 +114,29 @@ type ExtraValue []string func (t ExtraValue) String() string { return fmt.Sprintf("%v", []string(t)) } + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +type SelfSubjectReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Status is filled in by the server with the user attributes. + Status SelfSubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +type SelfSubjectReviewStatus struct { + // User attributes of the user making this request. + // +optional + UserInfo v1.UserInfo `json:"userInfo,omitempty" protobuf:"bytes,1,opt,name=userInfo"` +} diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index 1086955c..d6644f2c 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -24,9 +24,28 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_SelfSubjectReview = map[string]string{ + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Status is filled in by the server with the user attributes.", +} + +func (SelfSubjectReview) SwaggerDoc() map[string]string { + return map_SelfSubjectReview +} + +var map_SelfSubjectReviewStatus = map[string]string{ + "": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "userInfo": "User attributes of the user making this request.", +} + +func (SelfSubjectReviewStatus) SwaggerDoc() map[string]string { + return map_SelfSubjectReviewStatus +} + var map_TokenReview = map[string]string{ "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index 059ec1a8..99ffadf7 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -45,6 +45,50 @@ func (in ExtraValue) DeepCopy() ExtraValue { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview. +func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview { + if in == nil { + return nil + } + out := new(SelfSubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) { + *out = *in + in.UserInfo.DeepCopyInto(&out.UserInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus. +func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus { + if in == nil { + return nil + } + out := new(SelfSubjectReviewStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go index e448106e..90479692 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go @@ -25,6 +25,24 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" ) +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *SelfSubjectReview) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *SelfSubjectReview) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *TokenReview) APILifecycleIntroduced() (major, minor int) { diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 2e5fbea7..93229485 100644 --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index 2d291189..e0846be7 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 8cf997a7..1dbafd1a 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -87,13 +87,13 @@ message ContainerResourceMetricStatus { // CrossVersionObjectReference contains enough information to let you identify the referred resource. // +structType=atomic message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -147,11 +147,11 @@ message HorizontalPodAutoscaler { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; - // current information about the autoscaler. + // status is the current information about the autoscaler. // +optional optional HorizontalPodAutoscalerStatus status = 3; } @@ -186,7 +186,7 @@ message HorizontalPodAutoscalerList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // list of horizontal pod autoscaler objects. + // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; } @@ -204,10 +204,10 @@ message HorizontalPodAutoscalerSpec { // +optional optional int32 minReplicas = 2; - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. optional int32 maxReplicas = 3; - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified the default autoscaling policy will be used. // +optional optional int32 targetCPUUtilizationPercentage = 4; @@ -215,22 +215,22 @@ message HorizontalPodAutoscalerSpec { // current status of a horizontal pod autoscaler message HorizontalPodAutoscalerStatus { - // most recent generation observed by this autoscaler. + // observedGeneration is the most recent generation observed by this autoscaler. // +optional optional int64 observedGeneration = 1; - // last time the HorizontalPodAutoscaler scaled the number of pods; + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; - // current number of replicas of pods managed by this autoscaler. + // currentReplicas is the current number of replicas of pods managed by this autoscaler. optional int32 currentReplicas = 3; - // desired number of replicas of pods managed by this autoscaler. + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler. optional int32 desiredReplicas = 4; - // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. // +optional optional int32 currentCPUUtilizationPercentage = 5; @@ -264,7 +264,7 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod of the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -309,7 +309,7 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -464,31 +464,31 @@ message Scale { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } // ScaleSpec describes the attributes of a scale subresource. message ScaleSpec { - // desired number of instances for the scaled object. + // replicas is the desired number of instances for the scaled object. // +optional optional int32 replicas = 1; } // ScaleStatus represents the current status of a scale subresource. message ScaleStatus { - // actual number of observed instances of the scaled object. + // replicas is the actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. This is same + // selector is the label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional optional string selector = 2; } diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 6397430a..45082901 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -25,11 +25,13 @@ import ( // CrossVersionObjectReference contains enough information to let you identify the referred resource. // +structType=atomic type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -46,9 +48,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + + // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + + // targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified the default autoscaling policy will be used. // +optional TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` @@ -56,22 +60,22 @@ type HorizontalPodAutoscalerSpec struct { // current status of a horizontal pod autoscaler type HorizontalPodAutoscalerStatus struct { - // most recent generation observed by this autoscaler. + // observedGeneration is the most recent generation observed by this autoscaler. // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // last time the HorizontalPodAutoscaler scaled the number of pods; + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` - // current number of replicas of pods managed by this autoscaler. + // currentReplicas is the current number of replicas of pods managed by this autoscaler. CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` - // desired number of replicas of pods managed by this autoscaler. + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler. DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` - // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. // +optional CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` @@ -87,11 +91,11 @@ type HorizontalPodAutoscaler struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current information about the autoscaler. + // status is the current information about the autoscaler. // +optional Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -105,7 +109,7 @@ type HorizontalPodAutoscalerList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // list of horizontal pod autoscaler objects. + // items is the list of horizontal pod autoscaler objects. Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -118,31 +122,31 @@ type Scale struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // ScaleSpec describes the attributes of a scale subresource. type ScaleSpec struct { - // desired number of instances for the scaled object. + // replicas is the desired number of instances for the scaled object. // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { - // actual number of observed instances of the scaled object. + // replicas is the actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. This is same + // selector is the label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` } @@ -194,11 +198,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -206,7 +212,8 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod of the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -214,6 +221,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -231,6 +239,7 @@ type ObjectMetricSource struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` @@ -239,6 +248,7 @@ type ObjectMetricSource struct { // When unset, just the metricName will be used to gather metrics. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional @@ -252,6 +262,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metricName is the name of the metric in question MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` @@ -273,11 +284,13 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. @@ -295,16 +308,19 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` + // container is the name of the container in the pods of the scaling target. Container string `json:"container" protobuf:"bytes,5,opt,name=container"` } @@ -315,14 +331,17 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series // within a given metric. // +optional MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"` + // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional @@ -341,11 +360,13 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -353,13 +374,15 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -390,15 +413,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -413,6 +440,7 @@ type ObjectMetricStatus struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` @@ -421,6 +449,7 @@ type ObjectMetricStatus struct { // When unset, just the metricName will be used to gather metrics. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional @@ -432,6 +461,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metricName is the name of the metric in question MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` @@ -451,6 +481,7 @@ type PodsMetricStatus struct { type ResourceMetricStatus struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. It will only be @@ -458,6 +489,7 @@ type ResourceMetricStatus struct { // specification. // +optional CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. @@ -473,6 +505,7 @@ type ResourceMetricStatus struct { type ContainerResourceMetricStatus struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. It will only be @@ -480,11 +513,13 @@ type ContainerResourceMetricStatus struct { // specification. // +optional CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` + // container is the name of the container in the pods of the scaling taget Container string `json:"container" protobuf:"bytes,4,opt,name=container"` } @@ -495,12 +530,14 @@ type ExternalMetricStatus struct { // metricName is the name of a metric used for autoscaling in // metric system. MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series // within a given metric. // +optional MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` // currentValue is the current value of the metric (as a quantity) CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` + // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"` diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index ca288e91..37c2b36a 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -53,9 +53,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -89,8 +89,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current information about the autoscaler.", + "spec": "spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", } func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { @@ -113,7 +113,7 @@ func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerList = map[string]string{ "": "list of horizontal pod autoscaler objects.", "metadata": "Standard list metadata.", - "items": "list of horizontal pod autoscaler objects.", + "items": "items is the list of horizontal pod autoscaler objects.", } func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { @@ -124,8 +124,8 @@ var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "specification of a horizontal pod autoscaler.", "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", - "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", - "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", + "maxReplicas": "maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + "targetCPUUtilizationPercentage": "targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", } func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { @@ -134,11 +134,11 @@ func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerStatus = map[string]string{ "": "current status of a horizontal pod autoscaler", - "observedGeneration": "most recent generation observed by this autoscaler.", - "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", - "currentReplicas": "current number of replicas of pods managed by this autoscaler.", - "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", - "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is the current number of replicas of pods managed by this autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler.", + "currentCPUUtilizationPercentage": "currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", } func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { @@ -151,7 +151,7 @@ var map_MetricSpec = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -165,7 +165,7 @@ var map_MetricStatus = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -246,8 +246,8 @@ func (ResourceMetricStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "spec": "spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -256,7 +256,7 @@ func (Scale) SwaggerDoc() map[string]string { var map_ScaleSpec = map[string]string{ "": "ScaleSpec describes the attributes of a scale subresource.", - "replicas": "desired number of instances for the scaled object.", + "replicas": "replicas is the desired number of instances for the scaled object.", } func (ScaleSpec) SwaggerDoc() map[string]string { @@ -265,8 +265,8 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "replicas": "replicas is the actual number of observed instances of the scaled object.", + "selector": "selector is the label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", } func (ScaleStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto index c0832802..a9e36975 100644 --- a/vendor/k8s.io/api/autoscaling/v2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto @@ -54,25 +54,25 @@ message ContainerResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ContainerResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric optional MetricValueStatus current = 2; - // Container is the name of the container in the pods of the scaling target + // container is the name of the container in the pods of the scaling target optional string container = 3; } // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -100,14 +100,14 @@ message ExternalMetricStatus { // HPAScalingPolicy is a single policy which must hold true for a specified past interval. message HPAScalingPolicy { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. optional string type = 1; - // Value contains the amount of change which is permitted by the policy. + // value contains the amount of change which is permitted by the policy. // It must be greater than zero optional int32 value = 2; - // PeriodSeconds specifies the window of time for which the policy should hold true. + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). optional int32 periodSeconds = 3; } @@ -119,7 +119,7 @@ message HPAScalingPolicy { // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. message HPAScalingRules { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -495,7 +495,7 @@ message ResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go index 9b2dc36e..c12a83df 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2/types.go @@ -59,9 +59,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. // It cannot be less that minReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the // desired replica count (the maximum replica count across all metrics will // be used). The desired replica count is calculated multiplying the @@ -83,11 +85,13 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -105,11 +109,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -117,6 +123,7 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in // each pod of the current scale target (e.g. CPU or memory). Such metrics are @@ -125,6 +132,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -144,6 +152,7 @@ type HorizontalPodAutoscalerBehavior struct { // No stabilization is used. // +optional ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. // If not set, the default value is to allow to scale down to minReplicas pods, with a // 300 second stabilization window (i.e., the highest recommendation for @@ -171,7 +180,7 @@ const ( // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. type HPAScalingRules struct { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -179,10 +188,12 @@ type HPAScalingRules struct { // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. // If not set, the default value Max is used. // +optional SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +listType=atomic @@ -203,12 +214,14 @@ const ( // HPAScalingPolicy is a single policy which must hold true for a specified past interval. type HPAScalingPolicy struct { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` - // Value contains the amount of change which is permitted by the policy. + + // value contains the amount of change which is permitted by the policy. // It must be greater than zero Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` - // PeriodSeconds specifies the window of time for which the policy should hold true. + + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` } @@ -249,8 +262,10 @@ const ( type ObjectMetricSource struct { // describedObject specifies the descriptions of a object,such as kind,name apiVersion DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` } @@ -262,6 +277,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -276,6 +292,7 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -290,8 +307,10 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -302,6 +321,7 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -310,6 +330,7 @@ type ExternalMetricSource struct { type MetricIdentifier struct { // name is the name of the given metric Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. @@ -321,13 +342,16 @@ type MetricIdentifier struct { type MetricTarget struct { // type represents whether the metric type is Utilization, Value, or AverageValue Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. @@ -405,15 +429,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -432,11 +460,13 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -444,6 +474,7 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -451,6 +482,7 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -465,8 +497,10 @@ type MetricStatus struct { type ObjectMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + // DescribedObject specifies the descriptions of a object,such as kind,name apiVersion DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,3,name=describedObject"` } @@ -476,6 +510,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -486,8 +521,9 @@ type PodsMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -498,11 +534,13 @@ type ResourceMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ContainerResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` - // Container is the name of the container in the pods of the scaling target + + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -511,6 +549,7 @@ type ContainerResourceMetricStatus struct { type ExternalMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -520,10 +559,12 @@ type MetricValueStatus struct { // value is the current value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. diff --git a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go index 41ab32a4..1941b1ef 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -40,9 +40,9 @@ func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { var map_ContainerResourceMetricStatus = map[string]string{ "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", - "container": "Container is the name of the container in the pods of the scaling target", + "container": "container is the name of the container in the pods of the scaling target", } func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { @@ -51,9 +51,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HPAScalingPolicy = map[string]string{ "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", - "type": "Type is used to specify the scaling policy.", - "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", - "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", + "type": "type is used to specify the scaling policy.", + "value": "value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", } func (HPAScalingPolicy) SwaggerDoc() map[string]string { @@ -93,7 +93,7 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string { var map_HPAScalingRules = map[string]string{ "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", - "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.", "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", } @@ -288,7 +288,7 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { var map_ResourceMetricStatus = map[string]string{ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 33d27a96..6b3d4152 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -89,7 +89,7 @@ message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; // API version of the referent diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index c1480ab3..84228407 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -26,7 +26,7 @@ import ( type CrossVersionObjectReference struct { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` // API version of the referent // +optional diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 6f555487..d656ee41 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -54,7 +54,7 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "name": "Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "apiVersion": "API version of the referent", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 1bafbf6c..5b2fe944 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -54,25 +54,25 @@ message ContainerResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ContainerResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric optional MetricValueStatus current = 2; - // Container is the name of the container in the pods of the scaling target + // container is the name of the container in the pods of the scaling target optional string container = 3; } // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -100,14 +100,14 @@ message ExternalMetricStatus { // HPAScalingPolicy is a single policy which must hold true for a specified past interval. message HPAScalingPolicy { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. optional string type = 1; - // Value contains the amount of change which is permitted by the policy. + // value contains the amount of change which is permitted by the policy. // It must be greater than zero optional int32 value = 2; - // PeriodSeconds specifies the window of time for which the policy should hold true. + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). optional int32 periodSeconds = 3; } @@ -119,7 +119,7 @@ message HPAScalingPolicy { // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. message HPAScalingRules { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -361,7 +361,7 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -411,7 +411,7 @@ message MetricValueStatus { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; - // currentAverageUtilization is the current value of the average of the + // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional @@ -485,7 +485,7 @@ message ResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 60da3ba0..b0b7681c 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -62,9 +62,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. // It cannot be less that minReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the // desired replica count (the maximum replica count across all metrics will // be used). The desired replica count is calculated multiplying the @@ -85,11 +87,13 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -107,11 +111,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -119,6 +125,7 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in // each pod of the current scale target (e.g. CPU or memory). Such metrics are @@ -127,6 +134,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -146,6 +154,7 @@ type HorizontalPodAutoscalerBehavior struct { // No stabilization is used. // +optional ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. // If not set, the default value is to allow to scale down to minReplicas pods, with a // 300 second stabilization window (i.e., the highest recommendation for @@ -173,7 +182,7 @@ const ( // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. type HPAScalingRules struct { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -181,10 +190,12 @@ type HPAScalingRules struct { // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. // If not set, the default value MaxPolicySelect is used. // +optional SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +optional @@ -204,12 +215,14 @@ const ( // HPAScalingPolicy is a single policy which must hold true for a specified past interval. type HPAScalingPolicy struct { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` - // Value contains the amount of change which is permitted by the policy. + + // value contains the amount of change which is permitted by the policy. // It must be greater than zero Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` - // PeriodSeconds specifies the window of time for which the policy should hold true. + + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` } @@ -251,6 +264,7 @@ type ObjectMetricSource struct { DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` } @@ -262,6 +276,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -276,6 +291,7 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -290,8 +306,10 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -302,6 +320,7 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -310,6 +329,7 @@ type ExternalMetricSource struct { type MetricIdentifier struct { // name is the name of the given metric Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. @@ -321,13 +341,16 @@ type MetricIdentifier struct { type MetricTarget struct { // type represents whether the metric type is Utilization, Value, or AverageValue Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. @@ -399,15 +422,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -426,6 +453,7 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. @@ -438,13 +466,15 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -459,6 +489,7 @@ type MetricStatus struct { type ObjectMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` @@ -470,6 +501,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -480,8 +512,9 @@ type PodsMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -492,11 +525,13 @@ type ResourceMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ContainerResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` - // Container is the name of the container in the pods of the scaling target + + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -505,6 +540,7 @@ type ContainerResourceMetricStatus struct { type ExternalMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -514,11 +550,13 @@ type MetricValueStatus struct { // value is the current value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` - // currentAverageUtilization is the current value of the average of the + + // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index cb92e9e3..4af7d0ec 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -40,9 +40,9 @@ func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { var map_ContainerResourceMetricStatus = map[string]string{ "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", - "container": "Container is the name of the container in the pods of the scaling target", + "container": "container is the name of the container in the pods of the scaling target", } func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { @@ -51,9 +51,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HPAScalingPolicy = map[string]string{ "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", - "type": "Type is used to specify the scaling policy.", - "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", - "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", + "type": "type is used to specify the scaling policy.", + "value": "value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", } func (HPAScalingPolicy) SwaggerDoc() map[string]string { @@ -93,7 +93,7 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string { var map_HPAScalingRules = map[string]string{ "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", - "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.", "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", } @@ -203,7 +203,7 @@ var map_MetricStatus = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -227,7 +227,7 @@ var map_MetricValueStatus = map[string]string{ "": "MetricValueStatus holds the current value for a metric", "value": "value is the current value of the metric (as a quantity).", "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", - "averageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "averageUtilization": "averageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", } func (MetricValueStatus) SwaggerDoc() map[string]string { @@ -286,7 +286,7 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { var map_ResourceMetricStatus = map[string]string{ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", } diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 5e815973..a21b5f1b 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -72,7 +72,6 @@ message CronJobSpec { // configuration, the controller will stop creating new new Jobs and will create a system event with the // reason UnknownTimeZone. // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // This is beta field and must be enabled via the `CronJobTimeZone` feature gate. // +optional optional string timeZone = 8; @@ -83,6 +82,7 @@ message CronJobSpec { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one @@ -189,7 +189,7 @@ message JobSpec { optional int32 parallelism = 1; // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -243,6 +243,7 @@ message JobSpec { optional bool manualSelector = 5; // Describes the pod that will be created when executing a job. + // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ optional k8s.io.api.core.v1.PodTemplateSpec template = 6; @@ -256,7 +257,7 @@ message JobSpec { // +optional optional int32 ttlSecondsAfterFinished = 8; - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -281,7 +282,7 @@ message JobSpec { // +optional optional string completionMode = 9; - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -334,7 +335,7 @@ message JobStatus { // +optional optional int32 failed = 6; - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -344,15 +345,16 @@ message JobStatus { // +optional optional string completedIndexes = 7; - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the arrays in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the arrays while increasing the corresponding + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding // counter. // // Old jobs might not be tracked using this field, in which case the field @@ -409,6 +411,7 @@ message PodFailurePolicyOnExitCodesRequirement { // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are // excluded from the requirement check. Possible values are: + // // - In: the requirement is satisfied if at least one container exit code // (might be multiple if there are multiple containers not restricted // by the 'containerName' field) is in the set of specified values. @@ -442,10 +445,11 @@ message PodFailurePolicyOnPodConditionsPattern { } // PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. -// One of OnExitCodes and onPodConditions, but not both, can be used in each rule. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. message PodFailurePolicyRule { // Specifies the action taken on a pod failure when the requirements are satisfied. // Possible values are: + // // - FailJob: indicates that the pod's job is marked as Failed and all // running pods are terminated. // - Ignore: indicates that the counter towards the .backoffLimit is not @@ -471,12 +475,12 @@ message PodFailurePolicyRule { // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. message UncountedTerminatedPods { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional repeated string succeeded = 1; - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional repeated string failed = 2; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index d298a02f..c12e91b0 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -23,8 +23,11 @@ import ( ) const ( - JobCompletionIndexAnnotation = "batch.kubernetes.io/job-completion-index" + // All Kubernetes labels need to be prefixed with Kubernetes to distinguish them from end-user labels + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#label-selector-and-annotation-conventions + labelPrefix = "batch.kubernetes.io/" + JobCompletionIndexAnnotation = labelPrefix + "job-completion-index" // JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from // being deleted before being accounted in the Job status. // @@ -34,7 +37,14 @@ const ( // 1.27+, one release after JobTrackingWithFinalizers graduates to GA, the // apiserver and job controller will ignore this annotation and they will // always track jobs using finalizers. - JobTrackingFinalizer = "batch.kubernetes.io/job-tracking" + JobTrackingFinalizer = labelPrefix + "job-tracking" + // The Job labels will use batch.kubernetes.io as a prefix for all labels + // Historically the job controller uses unprefixed labels for job-name and controller-uid and + // Kubernetes continutes to recognize those unprefixed labels for consistency. + JobNameLabel = labelPrefix + "job-name" + // ControllerUid is used to programatically get pods corresponding to a Job. + // There is a corresponding label without the batch.kubernetes.io that we support for legacy reasons. + ControllerUidLabel = labelPrefix + "controller-uid" ) // +genclient @@ -135,6 +145,7 @@ type PodFailurePolicyOnExitCodesRequirement struct { // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are // excluded from the requirement check. Possible values are: + // // - In: the requirement is satisfied if at least one container exit code // (might be multiple if there are multiple containers not restricted // by the 'containerName' field) is in the set of specified values. @@ -168,10 +179,11 @@ type PodFailurePolicyOnPodConditionsPattern struct { } // PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. -// One of OnExitCodes and onPodConditions, but not both, can be used in each rule. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. type PodFailurePolicyRule struct { // Specifies the action taken on a pod failure when the requirements are satisfied. // Possible values are: + // // - FailJob: indicates that the pod's job is marked as Failed and all // running pods are terminated. // - Ignore: indicates that the counter towards the .backoffLimit is not @@ -217,7 +229,7 @@ type JobSpec struct { Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -276,6 +288,7 @@ type JobSpec struct { ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` // Describes the pod that will be created when executing a job. + // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` @@ -289,7 +302,7 @@ type JobSpec struct { // +optional TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"` - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -314,7 +327,7 @@ type JobSpec struct { // +optional CompletionMode *CompletionMode `json:"completionMode,omitempty" protobuf:"bytes,9,opt,name=completionMode,casttype=CompletionMode"` - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -367,7 +380,7 @@ type JobStatus struct { // +optional Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -377,15 +390,16 @@ type JobStatus struct { // +optional CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"` - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the arrays in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the arrays while increasing the corresponding + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding // counter. // // Old jobs might not be tracked using this field, in which case the field @@ -404,12 +418,12 @@ type JobStatus struct { // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. type UncountedTerminatedPods struct { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional Succeeded []types.UID `json:"succeeded,omitempty" protobuf:"bytes,1,rep,name=succeeded,casttype=k8s.io/apimachinery/pkg/types.UID"` - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"` @@ -514,7 +528,6 @@ type CronJobSpec struct { // configuration, the controller will stop creating new new Jobs and will create a system event with the // reason UnknownTimeZone. // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // This is beta field and must be enabled via the `CronJobTimeZone` feature gate. // +optional TimeZone *string `json:"timeZone,omitempty" protobuf:"bytes,8,opt,name=timeZone"` @@ -525,6 +538,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index ab33a974..f6f3141f 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ @@ -51,9 +51,9 @@ func (CronJobList) SwaggerDoc() map[string]string { var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", - "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones This is beta field and must be enabled via the `CronJobTimeZone` feature gate.", + "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.", @@ -113,16 +113,16 @@ func (JobList) SwaggerDoc() map[string]string { var map_JobSpec = map[string]string{ "": "JobSpec describes how the job execution will look like.", "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", - "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", - "completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", - "suspend": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", + "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", + "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", } func (JobSpec) SwaggerDoc() map[string]string { @@ -137,8 +137,8 @@ var map_JobStatus = map[string]string{ "active": "The number of pending and running pods.", "succeeded": "The number of pods which reached phase Succeeded.", "failed": "The number of pods which reached phase Failed.", - "completedIndexes": "CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "uncountedTerminatedPods": "UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", + "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", + "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", "ready": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).", } @@ -168,7 +168,7 @@ func (PodFailurePolicy) SwaggerDoc() map[string]string { var map_PodFailurePolicyOnExitCodesRequirement = map[string]string{ "": "PodFailurePolicyOnExitCodesRequirement describes the requirement for handling a failed pod based on its container exit codes. In particular, it lookups the .state.terminated.exitCode for each app container and init container status, represented by the .status.containerStatuses and .status.initContainerStatuses fields in the Pod status, respectively. Containers completed with success (exit code 0) are excluded from the requirement check.", "containerName": "Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template.", - "operator": "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are: - In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.", + "operator": "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are:\n\n- In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.", "values": "Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.", } @@ -187,8 +187,8 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string { } var map_PodFailurePolicyRule = map[string]string{ - "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of OnExitCodes and onPodConditions, but not both, can be used in each rule.", - "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are: - FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", + "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "onExitCodes": "Represents the requirement on the container exit codes.", "onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", } @@ -199,8 +199,8 @@ func (PodFailurePolicyRule) SwaggerDoc() map[string]string { var map_UncountedTerminatedPods = map[string]string{ "": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", - "succeeded": "Succeeded holds UIDs of succeeded Pods.", - "failed": "Failed holds UIDs of failed Pods.", + "succeeded": "succeeded holds UIDs of succeeded Pods.", + "failed": "failed holds UIDs of failed Pods.", } func (UncountedTerminatedPods) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index d042fc69..03feb2ce 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -157,38 +157,10 @@ func (m *CronJobStatus) XXX_DiscardUnknown() { var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{4} -} -func (m *JobTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobTemplate.Merge(m, src) -} -func (m *JobTemplate) XXX_Size() int { - return m.Size() -} -func (m *JobTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_JobTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_JobTemplate proto.InternalMessageInfo - func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } func (*JobTemplateSpec) ProtoMessage() {} func (*JobTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{5} + return fileDescriptor_e57b277b05179ae7, []int{4} } func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +190,6 @@ func init() { proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v1beta1.CronJobList") proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v1beta1.CronJobSpec") proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v1beta1.CronJobStatus") - proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") } @@ -227,58 +198,57 @@ func init() { } var fileDescriptor_e57b277b05179ae7 = []byte{ - // 814 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0xbd, 0x4e, 0x9c, 0xb8, 0xe3, 0x16, 0xd2, 0x01, 0xa5, 0x2b, 0x83, 0xd6, 0xc1, 0x55, - 0x85, 0x41, 0x30, 0x4b, 0x22, 0x84, 0x38, 0x55, 0xea, 0x16, 0x15, 0x08, 0x41, 0x45, 0xe3, 0x22, - 0xa4, 0xaa, 0x42, 0x9d, 0x1d, 0xbf, 0x38, 0xd3, 0x78, 0x77, 0x56, 0x3b, 0xb3, 0x91, 0x72, 0xe3, - 0xc2, 0x9d, 0xef, 0xc2, 0x9d, 0x73, 0x8e, 0xbd, 0xd1, 0xd3, 0x8a, 0x2c, 0xdf, 0x82, 0x13, 0x9a, - 0xf1, 0x7a, 0xed, 0xda, 0xeb, 0xa6, 0xbd, 0xf4, 0xe6, 0x79, 0xf3, 0xff, 0xff, 0xe6, 0xed, 0x7b, - 0x6f, 0x67, 0x8d, 0xee, 0x9d, 0x7e, 0xad, 0x88, 0x90, 0xfe, 0x69, 0x16, 0x42, 0x1a, 0x83, 0x06, - 0xe5, 0x9f, 0x41, 0x3c, 0x92, 0xa9, 0x5f, 0x6e, 0xb0, 0x44, 0xf8, 0x21, 0xd3, 0xfc, 0xc4, 0x3f, - 0xdb, 0x0f, 0x41, 0xb3, 0x7d, 0x7f, 0x0c, 0x31, 0xa4, 0x4c, 0xc3, 0x88, 0x24, 0xa9, 0xd4, 0x12, - 0xbb, 0x53, 0x25, 0x61, 0x89, 0x20, 0x56, 0x49, 0x4a, 0x65, 0xf7, 0xf3, 0xb1, 0xd0, 0x27, 0x59, - 0x48, 0xb8, 0x8c, 0xfc, 0xb1, 0x1c, 0x4b, 0xdf, 0x1a, 0xc2, 0xec, 0xd8, 0xae, 0xec, 0xc2, 0xfe, - 0x9a, 0x82, 0xba, 0xb7, 0x6b, 0x8e, 0x5c, 0x3e, 0xad, 0xdb, 0x5f, 0x10, 0x71, 0x99, 0x42, 0x9d, - 0xe6, 0xcb, 0xb9, 0x26, 0x62, 0xfc, 0x44, 0xc4, 0x90, 0x9e, 0xfb, 0xc9, 0xe9, 0xd8, 0x04, 0x94, - 0x1f, 0x81, 0x66, 0x75, 0x2e, 0x7f, 0x9d, 0x2b, 0xcd, 0x62, 0x2d, 0x22, 0x58, 0x31, 0x7c, 0x75, - 0x95, 0x41, 0xf1, 0x13, 0x88, 0xd8, 0xb2, 0xaf, 0xff, 0x7b, 0x13, 0x6d, 0xdf, 0x4f, 0x65, 0x7c, - 0x28, 0x43, 0xfc, 0x14, 0xb5, 0x4d, 0x3e, 0x23, 0xa6, 0x99, 0xeb, 0xec, 0x39, 0x83, 0xce, 0xc1, - 0x17, 0x64, 0x5e, 0xcf, 0x0a, 0x4b, 0x92, 0xd3, 0xb1, 0x09, 0x28, 0x62, 0xd4, 0xe4, 0x6c, 0x9f, - 0x3c, 0x0c, 0x9f, 0x01, 0xd7, 0x3f, 0x82, 0x66, 0x01, 0xbe, 0xc8, 0x7b, 0x8d, 0x22, 0xef, 0xa1, - 0x79, 0x8c, 0x56, 0x54, 0xfc, 0x2d, 0xda, 0x54, 0x09, 0x70, 0xb7, 0x69, 0xe9, 0x77, 0xc8, 0xba, - 0x6e, 0x91, 0x32, 0xa5, 0x61, 0x02, 0x3c, 0xb8, 0x5e, 0x22, 0x37, 0xcd, 0x8a, 0x5a, 0x00, 0x7e, - 0x88, 0xb6, 0x94, 0x66, 0x3a, 0x53, 0xee, 0x86, 0x45, 0x7d, 0x7c, 0x35, 0xca, 0xca, 0x83, 0x77, - 0x4a, 0xd8, 0xd6, 0x74, 0x4d, 0x4b, 0x4c, 0xff, 0x4f, 0x07, 0x75, 0x4a, 0xe5, 0x91, 0x50, 0x1a, - 0x3f, 0x59, 0xa9, 0x05, 0x79, 0xbd, 0x5a, 0x18, 0xb7, 0xad, 0xc4, 0x4e, 0x79, 0x52, 0x7b, 0x16, - 0x59, 0xa8, 0xc3, 0x03, 0xd4, 0x12, 0x1a, 0x22, 0xe5, 0x36, 0xf7, 0x36, 0x06, 0x9d, 0x83, 0x8f, - 0xae, 0xcc, 0x3e, 0xb8, 0x51, 0xd2, 0x5a, 0xdf, 0x1b, 0x1f, 0x9d, 0xda, 0xfb, 0x7f, 0x6f, 0x56, - 0x59, 0x9b, 0xe2, 0xe0, 0xcf, 0x50, 0xdb, 0xf4, 0x79, 0x94, 0x4d, 0xc0, 0x66, 0x7d, 0x6d, 0x9e, - 0xc5, 0xb0, 0x8c, 0xd3, 0x4a, 0x81, 0x07, 0xa8, 0x6d, 0x46, 0xe3, 0xb1, 0x8c, 0xc1, 0x6d, 0x5b, - 0xf5, 0x75, 0xa3, 0x7c, 0x54, 0xc6, 0x68, 0xb5, 0x8b, 0x7f, 0x46, 0xb7, 0x94, 0x66, 0xa9, 0x16, - 0xf1, 0xf8, 0x1b, 0x60, 0xa3, 0x89, 0x88, 0x61, 0x08, 0x5c, 0xc6, 0x23, 0x65, 0x5b, 0xb9, 0x11, - 0x7c, 0x50, 0xe4, 0xbd, 0x5b, 0xc3, 0x7a, 0x09, 0x5d, 0xe7, 0xc5, 0x4f, 0xd0, 0x4d, 0x2e, 0x63, - 0x9e, 0xa5, 0x29, 0xc4, 0xfc, 0xfc, 0x27, 0x39, 0x11, 0xfc, 0xdc, 0x36, 0xf4, 0x5a, 0x40, 0xca, - 0xbc, 0x6f, 0xde, 0x5f, 0x16, 0xfc, 0x57, 0x17, 0xa4, 0xab, 0x20, 0x7c, 0x07, 0x6d, 0xab, 0x4c, - 0x25, 0x10, 0x8f, 0xdc, 0xcd, 0x3d, 0x67, 0xd0, 0x0e, 0x3a, 0x45, 0xde, 0xdb, 0x1e, 0x4e, 0x43, - 0x74, 0xb6, 0x87, 0x9f, 0xa2, 0xce, 0x33, 0x19, 0x3e, 0x82, 0x28, 0x99, 0x30, 0x0d, 0x6e, 0xcb, - 0x36, 0xfb, 0x93, 0xf5, 0x1d, 0x39, 0x9c, 0x8b, 0xed, 0x78, 0xbe, 0x57, 0x66, 0xda, 0x59, 0xd8, - 0xa0, 0x8b, 0x48, 0xfc, 0x2b, 0xea, 0xaa, 0x8c, 0x73, 0x50, 0xea, 0x38, 0x9b, 0x1c, 0xca, 0x50, - 0x7d, 0x27, 0x94, 0x96, 0xe9, 0xf9, 0x91, 0x88, 0x84, 0x76, 0xb7, 0xf6, 0x9c, 0x41, 0x2b, 0xf0, - 0x8a, 0xbc, 0xd7, 0x1d, 0xae, 0x55, 0xd1, 0x57, 0x10, 0x30, 0x45, 0xbb, 0xc7, 0x4c, 0x4c, 0x60, - 0xb4, 0xc2, 0xde, 0xb6, 0xec, 0x6e, 0x91, 0xf7, 0x76, 0x1f, 0xd4, 0x2a, 0xe8, 0x1a, 0x67, 0xff, - 0xaf, 0x26, 0xba, 0xf1, 0xd2, 0x9b, 0x83, 0x7f, 0x40, 0x5b, 0x8c, 0x6b, 0x71, 0x66, 0x26, 0xcb, - 0x0c, 0xed, 0xed, 0xc5, 0x12, 0x99, 0xdb, 0x6f, 0x7e, 0x13, 0x50, 0x38, 0x06, 0xd3, 0x09, 0x98, - 0xbf, 0x6e, 0xf7, 0xac, 0x95, 0x96, 0x08, 0x3c, 0x41, 0x3b, 0x13, 0xa6, 0xf4, 0x6c, 0x28, 0xcd, - 0xc8, 0xd9, 0x26, 0x75, 0x0e, 0x3e, 0x7d, 0xbd, 0xd7, 0xcc, 0x38, 0x82, 0xf7, 0x8b, 0xbc, 0xb7, - 0x73, 0xb4, 0xc4, 0xa1, 0x2b, 0x64, 0x9c, 0x22, 0x6c, 0x63, 0x55, 0x09, 0xed, 0x79, 0xad, 0x37, - 0x3e, 0x6f, 0xb7, 0xc8, 0x7b, 0xf8, 0x68, 0x85, 0x44, 0x6b, 0xe8, 0xfd, 0x0b, 0x07, 0x2d, 0x4e, - 0xc4, 0x5b, 0xb8, 0x5c, 0x7f, 0x41, 0x6d, 0x3d, 0x9b, 0xe2, 0xe6, 0x9b, 0x4e, 0x71, 0x75, 0x4f, - 0x54, 0x23, 0x5c, 0xc1, 0xcc, 0xdd, 0xf8, 0xee, 0x92, 0xfe, 0x2d, 0x3c, 0xce, 0xdd, 0x97, 0xbe, - 0x15, 0x1f, 0xd6, 0x3d, 0x0a, 0x79, 0xc5, 0x27, 0x22, 0xb8, 0x7b, 0x71, 0xe9, 0x35, 0x9e, 0x5f, - 0x7a, 0x8d, 0x17, 0x97, 0x5e, 0xe3, 0xb7, 0xc2, 0x73, 0x2e, 0x0a, 0xcf, 0x79, 0x5e, 0x78, 0xce, - 0x8b, 0xc2, 0x73, 0xfe, 0x29, 0x3c, 0xe7, 0x8f, 0x7f, 0xbd, 0xc6, 0x63, 0x77, 0xdd, 0x5f, 0x8b, - 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xd7, 0xf2, 0x8b, 0xe9, 0x8e, 0x08, 0x00, 0x00, + // 787 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xc7, 0xbd, 0x49, 0x9c, 0xb8, 0xe3, 0x16, 0xd2, 0x01, 0xa5, 0x2b, 0x83, 0xd6, 0xc1, 0x55, + 0x85, 0x41, 0x30, 0x4b, 0x22, 0x84, 0x38, 0x55, 0xea, 0x16, 0x15, 0x08, 0x41, 0x45, 0xe3, 0x72, + 0xa9, 0x2a, 0xd4, 0xd9, 0xd9, 0x17, 0x67, 0x9a, 0xdd, 0x9d, 0xd5, 0xce, 0x6c, 0xa4, 0xdc, 0xb8, + 0x70, 0xe7, 0xbb, 0x70, 0xe7, 0x9c, 0x63, 0x6f, 0xf4, 0xb4, 0x22, 0xcb, 0xb7, 0xe0, 0x84, 0x66, + 0xbc, 0xb1, 0x5d, 0x7b, 0xdd, 0x84, 0x4b, 0x6f, 0x9e, 0x37, 0xff, 0xff, 0x6f, 0x9e, 0xde, 0x7b, + 0xfb, 0x8c, 0x1e, 0x9c, 0x7c, 0xad, 0x88, 0x90, 0xfe, 0x49, 0x11, 0x42, 0x9e, 0x82, 0x06, 0xe5, + 0x9f, 0x42, 0x1a, 0xc9, 0xdc, 0xaf, 0x2f, 0x58, 0x26, 0xfc, 0x90, 0x69, 0x7e, 0xec, 0x9f, 0xee, + 0x85, 0xa0, 0xd9, 0x9e, 0x3f, 0x86, 0x14, 0x72, 0xa6, 0x21, 0x22, 0x59, 0x2e, 0xb5, 0xc4, 0xee, + 0x44, 0x49, 0x58, 0x26, 0x88, 0x55, 0x92, 0x5a, 0xd9, 0xfb, 0x7c, 0x2c, 0xf4, 0x71, 0x11, 0x12, + 0x2e, 0x13, 0x7f, 0x2c, 0xc7, 0xd2, 0xb7, 0x86, 0xb0, 0x38, 0xb2, 0x27, 0x7b, 0xb0, 0xbf, 0x26, + 0xa0, 0xde, 0xdd, 0x86, 0x27, 0x17, 0x5f, 0xeb, 0x0d, 0xe6, 0x44, 0x5c, 0xe6, 0xd0, 0xa4, 0xf9, + 0x72, 0xa6, 0x49, 0x18, 0x3f, 0x16, 0x29, 0xe4, 0x67, 0x7e, 0x76, 0x32, 0x36, 0x01, 0xe5, 0x27, + 0xa0, 0x59, 0x93, 0xcb, 0x5f, 0xe5, 0xca, 0x8b, 0x54, 0x8b, 0x04, 0x96, 0x0c, 0x5f, 0x5d, 0x65, + 0x50, 0xfc, 0x18, 0x12, 0xb6, 0xe8, 0x1b, 0xfc, 0xb6, 0x86, 0xb6, 0x1e, 0xe6, 0x32, 0x3d, 0x90, + 0x21, 0x7e, 0x8e, 0x3a, 0x26, 0x9f, 0x88, 0x69, 0xe6, 0x3a, 0xbb, 0xce, 0xb0, 0xbb, 0xff, 0x05, + 0x99, 0xd5, 0x73, 0x8a, 0x25, 0xd9, 0xc9, 0xd8, 0x04, 0x14, 0x31, 0x6a, 0x72, 0xba, 0x47, 0x1e, + 0x87, 0x2f, 0x80, 0xeb, 0x1f, 0x41, 0xb3, 0x00, 0x9f, 0x97, 0xfd, 0x56, 0x55, 0xf6, 0xd1, 0x2c, + 0x46, 0xa7, 0x54, 0xfc, 0x2d, 0xda, 0x50, 0x19, 0x70, 0x77, 0xcd, 0xd2, 0xef, 0x91, 0x55, 0xdd, + 0x22, 0x75, 0x4a, 0xa3, 0x0c, 0x78, 0x70, 0xb3, 0x46, 0x6e, 0x98, 0x13, 0xb5, 0x00, 0xfc, 0x18, + 0x6d, 0x2a, 0xcd, 0x74, 0xa1, 0xdc, 0x75, 0x8b, 0xfa, 0xf8, 0x6a, 0x94, 0x95, 0x07, 0xef, 0xd4, + 0xb0, 0xcd, 0xc9, 0x99, 0xd6, 0x98, 0xc1, 0x1f, 0x0e, 0xea, 0xd6, 0xca, 0x43, 0xa1, 0x34, 0x7e, + 0xb6, 0x54, 0x0b, 0x72, 0xbd, 0x5a, 0x18, 0xb7, 0xad, 0xc4, 0x76, 0xfd, 0x52, 0xe7, 0x32, 0x32, + 0x57, 0x87, 0x47, 0xa8, 0x2d, 0x34, 0x24, 0xca, 0x5d, 0xdb, 0x5d, 0x1f, 0x76, 0xf7, 0x3f, 0xba, + 0x32, 0xfb, 0xe0, 0x56, 0x4d, 0x6b, 0x7f, 0x6f, 0x7c, 0x74, 0x62, 0x1f, 0xfc, 0xb5, 0x31, 0xcd, + 0xda, 0x14, 0x07, 0x7f, 0x86, 0x3a, 0xa6, 0xcf, 0x51, 0x11, 0x83, 0xcd, 0xfa, 0xc6, 0x2c, 0x8b, + 0x51, 0x1d, 0xa7, 0x53, 0x05, 0x1e, 0xa2, 0x8e, 0x19, 0x8d, 0xa7, 0x32, 0x05, 0xb7, 0x63, 0xd5, + 0x37, 0x8d, 0xf2, 0x49, 0x1d, 0xa3, 0xd3, 0x5b, 0xfc, 0x33, 0xba, 0xa3, 0x34, 0xcb, 0xb5, 0x48, + 0xc7, 0xdf, 0x00, 0x8b, 0x62, 0x91, 0xc2, 0x08, 0xb8, 0x4c, 0x23, 0x65, 0x5b, 0xb9, 0x1e, 0x7c, + 0x50, 0x95, 0xfd, 0x3b, 0xa3, 0x66, 0x09, 0x5d, 0xe5, 0xc5, 0xcf, 0xd0, 0x6d, 0x2e, 0x53, 0x5e, + 0xe4, 0x39, 0xa4, 0xfc, 0xec, 0x27, 0x19, 0x0b, 0x7e, 0x66, 0x1b, 0x7a, 0x23, 0x20, 0x75, 0xde, + 0xb7, 0x1f, 0x2e, 0x0a, 0xfe, 0x6d, 0x0a, 0xd2, 0x65, 0x10, 0xbe, 0x87, 0xb6, 0x54, 0xa1, 0x32, + 0x48, 0x23, 0x77, 0x63, 0xd7, 0x19, 0x76, 0x82, 0x6e, 0x55, 0xf6, 0xb7, 0x46, 0x93, 0x10, 0xbd, + 0xbc, 0xc3, 0xcf, 0x51, 0xf7, 0x85, 0x0c, 0x9f, 0x40, 0x92, 0xc5, 0x4c, 0x83, 0xdb, 0xb6, 0xcd, + 0xfe, 0x64, 0x75, 0x47, 0x0e, 0x66, 0x62, 0x3b, 0x9e, 0xef, 0xd5, 0x99, 0x76, 0xe7, 0x2e, 0xe8, + 0x3c, 0x12, 0xff, 0x82, 0x7a, 0xaa, 0xe0, 0x1c, 0x94, 0x3a, 0x2a, 0xe2, 0x03, 0x19, 0xaa, 0xef, + 0x84, 0xd2, 0x32, 0x3f, 0x3b, 0x14, 0x89, 0xd0, 0xee, 0xe6, 0xae, 0x33, 0x6c, 0x07, 0x5e, 0x55, + 0xf6, 0x7b, 0xa3, 0x95, 0x2a, 0xfa, 0x06, 0x02, 0xa6, 0x68, 0xe7, 0x88, 0x89, 0x18, 0xa2, 0x25, + 0xf6, 0x96, 0x65, 0xf7, 0xaa, 0xb2, 0xbf, 0xf3, 0xa8, 0x51, 0x41, 0x57, 0x38, 0x07, 0x7f, 0xae, + 0xa1, 0x5b, 0xaf, 0x7d, 0x39, 0xf8, 0x07, 0xb4, 0xc9, 0xb8, 0x16, 0xa7, 0x66, 0xb2, 0xcc, 0xd0, + 0xde, 0x9d, 0x2f, 0x91, 0xd9, 0x7e, 0xb3, 0x4d, 0x40, 0xe1, 0x08, 0x4c, 0x27, 0x60, 0xf6, 0xb9, + 0x3d, 0xb0, 0x56, 0x5a, 0x23, 0x70, 0x8c, 0xb6, 0x63, 0xa6, 0xf4, 0xe5, 0x50, 0x9a, 0x91, 0xb3, + 0x4d, 0xea, 0xee, 0x7f, 0x7a, 0xbd, 0xcf, 0xcc, 0x38, 0x82, 0xf7, 0xab, 0xb2, 0xbf, 0x7d, 0xb8, + 0xc0, 0xa1, 0x4b, 0x64, 0x9c, 0x23, 0x6c, 0x63, 0xd3, 0x12, 0xda, 0xf7, 0xda, 0xff, 0xfb, 0xbd, + 0x9d, 0xaa, 0xec, 0xe3, 0xc3, 0x25, 0x12, 0x6d, 0xa0, 0x9b, 0x85, 0xf2, 0xee, 0xc2, 0xa8, 0xbc, + 0x85, 0x05, 0x7b, 0xff, 0xb5, 0x05, 0xfb, 0x61, 0xd3, 0x14, 0x93, 0x37, 0xec, 0xd5, 0xe0, 0xfe, + 0xf9, 0x85, 0xd7, 0x7a, 0x79, 0xe1, 0xb5, 0x5e, 0x5d, 0x78, 0xad, 0x5f, 0x2b, 0xcf, 0x39, 0xaf, + 0x3c, 0xe7, 0x65, 0xe5, 0x39, 0xaf, 0x2a, 0xcf, 0xf9, 0xbb, 0xf2, 0x9c, 0xdf, 0xff, 0xf1, 0x5a, + 0x4f, 0xdd, 0x55, 0xff, 0xc7, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x61, 0x72, 0xc3, 0xe0, 0xc3, + 0x07, 0x00, 0x00, } func (m *CronJob) Marshal() (dAtA []byte, err error) { @@ -517,49 +487,6 @@ func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *JobTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -700,19 +627,6 @@ func (m *CronJobStatus) Size() (n int) { return n } -func (m *JobTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *JobTemplateSpec) Size() (n int) { if m == nil { return 0 @@ -794,17 +708,6 @@ func (this *CronJobStatus) String() string { }, "") return s } -func (this *JobTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *JobTemplateSpec) String() string { if this == nil { return "nil" @@ -1507,122 +1410,6 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index d8386a8f..ac774f19 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -73,7 +73,6 @@ message CronJobSpec { // configuration, the controller will stop creating new new Jobs and will create a system event with the // reason UnknownTimeZone. // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // This is beta field and must be enabled via the `CronJobTimeZone` feature gate. // +optional optional string timeZone = 8; @@ -84,6 +83,7 @@ message CronJobSpec { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one @@ -127,19 +127,6 @@ message CronJobStatus { optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; } -// JobTemplate describes a template for creating copies of a predefined pod. -message JobTemplate { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional JobTemplateSpec template = 2; -} - // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. diff --git a/vendor/k8s.io/api/batch/v1beta1/register.go b/vendor/k8s.io/api/batch/v1beta1/register.go index 226de49f..9382ca23 100644 --- a/vendor/k8s.io/api/batch/v1beta1/register.go +++ b/vendor/k8s.io/api/batch/v1beta1/register.go @@ -44,7 +44,6 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &JobTemplate{}, &CronJob{}, &CronJobList{}, ) diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index 4c0d69dd..976752a9 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -22,24 +22,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.8 -// +k8s:prerelease-lifecycle-gen:deprecated=1.22 - -// JobTemplate describes a template for creating copies of a predefined pod. -type JobTemplate struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` -} - // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. @@ -113,7 +95,6 @@ type CronJobSpec struct { // configuration, the controller will stop creating new new Jobs and will create a system event with the // reason UnknownTimeZone. // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // This is beta field and must be enabled via the `CronJobTimeZone` feature gate. // +optional TimeZone *string `json:"timeZone,omitempty" protobuf:"bytes,8,opt,name=timeZone"` @@ -124,6 +105,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index 5716bbb8..3b3eafe8 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ @@ -51,9 +51,9 @@ func (CronJobList) SwaggerDoc() map[string]string { var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", - "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones This is beta field and must be enabled via the `CronJobTimeZone` feature gate.", + "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.", @@ -75,16 +75,6 @@ func (CronJobStatus) SwaggerDoc() map[string]string { return map_CronJobStatus } -var map_JobTemplate = map[string]string{ - "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (JobTemplate) SwaggerDoc() map[string]string { - return map_JobTemplate -} - var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index c3a3494c..2c857033 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -158,33 +158,6 @@ func (in *CronJobStatus) DeepCopy() *CronJobStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobTemplate) DeepCopyInto(out *JobTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate. -func (in *JobTemplate) DeepCopy() *JobTemplate { - if in == nil { - return nil - } - out := new(JobTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *JobTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go index 2836b3b0..b57e9f1b 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go @@ -72,21 +72,3 @@ func (in *CronJobList) APILifecycleReplacement() schema.GroupVersionKind { func (in *CronJobList) APILifecycleRemoved() (major, minor int) { return 1, 25 } - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *JobTemplate) APILifecycleIntroduced() (major, minor int) { - return 1, 8 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *JobTemplate) APILifecycleDeprecated() (major, minor int) { - return 1, 22 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *JobTemplate) APILifecycleRemoved() (major, minor int) { - return 1, 25 -} diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go index af5efb51..92b2018e 100644 --- a/vendor/k8s.io/api/certificates/v1/types.go +++ b/vendor/k8s.io/api/certificates/v1/types.go @@ -274,8 +274,9 @@ type CertificateSigningRequestList struct { } // KeyUsage specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// See: // +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 // // +enum diff --git a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go index 0dc8a4c6..4bdf39eb 100644 --- a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ diff --git a/vendor/k8s.io/api/certificates/v1alpha1/doc.go b/vendor/k8s.io/api/certificates/v1alpha1/doc.go new file mode 100644 index 00000000..d83d0e82 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=certificates.k8s.io + +package v1alpha1 // import "k8s.io/api/certificates/v1alpha1" diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go new file mode 100644 index 00000000..546ecbef --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go @@ -0,0 +1,831 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ClusterTrustBundle) Reset() { *m = ClusterTrustBundle{} } +func (*ClusterTrustBundle) ProtoMessage() {} +func (*ClusterTrustBundle) Descriptor() ([]byte, []int) { + return fileDescriptor_8915b0d419f9eda6, []int{0} +} +func (m *ClusterTrustBundle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundle) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundle.Merge(m, src) +} +func (m *ClusterTrustBundle) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundle) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundle.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundle proto.InternalMessageInfo + +func (m *ClusterTrustBundleList) Reset() { *m = ClusterTrustBundleList{} } +func (*ClusterTrustBundleList) ProtoMessage() {} +func (*ClusterTrustBundleList) Descriptor() ([]byte, []int) { + return fileDescriptor_8915b0d419f9eda6, []int{1} +} +func (m *ClusterTrustBundleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleList.Merge(m, src) +} +func (m *ClusterTrustBundleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleList proto.InternalMessageInfo + +func (m *ClusterTrustBundleSpec) Reset() { *m = ClusterTrustBundleSpec{} } +func (*ClusterTrustBundleSpec) ProtoMessage() {} +func (*ClusterTrustBundleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8915b0d419f9eda6, []int{2} +} +func (m *ClusterTrustBundleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleSpec.Merge(m, src) +} +func (m *ClusterTrustBundleSpec) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundle") + proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleList") + proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleSpec") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1alpha1/generated.proto", fileDescriptor_8915b0d419f9eda6) +} + +var fileDescriptor_8915b0d419f9eda6 = []byte{ + // 448 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6b, 0x13, 0x41, + 0x14, 0xc7, 0x77, 0x6a, 0x0b, 0xed, 0x44, 0x41, 0x56, 0x90, 0x90, 0xc3, 0x34, 0xe4, 0xd4, 0x8b, + 0x33, 0x26, 0x54, 0xe9, 0x79, 0x05, 0xa1, 0xe0, 0x0f, 0xd8, 0x7a, 0xb1, 0x78, 0x70, 0x32, 0x79, + 0xdd, 0x8c, 0xc9, 0xee, 0x0e, 0x33, 0xb3, 0x01, 0x6f, 0x82, 0xff, 0x80, 0x7f, 0x56, 0x8e, 0xd5, + 0x53, 0x4f, 0xc5, 0xac, 0xff, 0x88, 0xcc, 0x64, 0x93, 0x5d, 0x5c, 0x25, 0xd2, 0xdb, 0xbe, 0x1f, + 0x9f, 0xef, 0x7b, 0xdf, 0xb7, 0x0c, 0x3e, 0x9f, 0x9d, 0x19, 0x2a, 0x73, 0x36, 0x2b, 0xc6, 0xa0, + 0x33, 0xb0, 0x60, 0xd8, 0x02, 0xb2, 0x49, 0xae, 0x59, 0x55, 0xe0, 0x4a, 0x32, 0x01, 0xda, 0xca, + 0x2b, 0x29, 0xb8, 0x2f, 0x0f, 0xf9, 0x5c, 0x4d, 0xf9, 0x90, 0x25, 0x90, 0x81, 0xe6, 0x16, 0x26, + 0x54, 0xe9, 0xdc, 0xe6, 0x61, 0x7f, 0x4d, 0x50, 0xae, 0x24, 0x6d, 0x12, 0x74, 0x43, 0xf4, 0x9e, + 0x24, 0xd2, 0x4e, 0x8b, 0x31, 0x15, 0x79, 0xca, 0x92, 0x3c, 0xc9, 0x99, 0x07, 0xc7, 0xc5, 0x95, + 0x8f, 0x7c, 0xe0, 0xbf, 0xd6, 0x82, 0xbd, 0xd3, 0x7a, 0x85, 0x94, 0x8b, 0xa9, 0xcc, 0x40, 0x7f, + 0x66, 0x6a, 0x96, 0xb8, 0x84, 0x61, 0x29, 0x58, 0xce, 0x16, 0xad, 0x35, 0x7a, 0xec, 0x5f, 0x94, + 0x2e, 0x32, 0x2b, 0x53, 0x68, 0x01, 0xcf, 0x77, 0x01, 0x46, 0x4c, 0x21, 0xe5, 0x7f, 0x72, 0x83, + 0x1f, 0x08, 0x87, 0x2f, 0xe6, 0x85, 0xb1, 0xa0, 0xdf, 0xe9, 0xc2, 0xd8, 0xa8, 0xc8, 0x26, 0x73, + 0x08, 0x3f, 0xe2, 0x43, 0xb7, 0xda, 0x84, 0x5b, 0xde, 0x45, 0x7d, 0x74, 0xd2, 0x19, 0x3d, 0xa5, + 0xf5, 0x65, 0xb6, 0x13, 0xa8, 0x9a, 0x25, 0x2e, 0x61, 0xa8, 0xeb, 0xa6, 0x8b, 0x21, 0x7d, 0x3b, + 0xfe, 0x04, 0xc2, 0xbe, 0x06, 0xcb, 0xa3, 0x70, 0x79, 0x7b, 0x1c, 0x94, 0xb7, 0xc7, 0xb8, 0xce, + 0xc5, 0x5b, 0xd5, 0xf0, 0x12, 0xef, 0x1b, 0x05, 0xa2, 0xbb, 0xe7, 0xd5, 0xcf, 0xe8, 0xae, 0xbb, + 0xd3, 0xf6, 0x96, 0x17, 0x0a, 0x44, 0x74, 0xbf, 0x9a, 0xb2, 0xef, 0xa2, 0xd8, 0x6b, 0x0e, 0xbe, + 0x23, 0xfc, 0xb8, 0xdd, 0xfe, 0x4a, 0x1a, 0x1b, 0x7e, 0x68, 0x19, 0xa3, 0xff, 0x67, 0xcc, 0xd1, + 0xde, 0xd6, 0xc3, 0x6a, 0xe0, 0xe1, 0x26, 0xd3, 0x30, 0xf5, 0x1e, 0x1f, 0x48, 0x0b, 0xa9, 0xe9, + 0xee, 0xf5, 0xef, 0x9d, 0x74, 0x46, 0xa7, 0x77, 0x71, 0x15, 0x3d, 0xa8, 0x06, 0x1c, 0x9c, 0x3b, + 0xa9, 0x78, 0xad, 0x38, 0xf8, 0xfa, 0x57, 0x4f, 0xce, 0x74, 0x38, 0xc2, 0xd8, 0xc8, 0x24, 0x03, + 0xfd, 0x86, 0xa7, 0xe0, 0x5d, 0x1d, 0xd5, 0xc7, 0xbf, 0xd8, 0x56, 0xe2, 0x46, 0x57, 0xf8, 0x0c, + 0x77, 0x6c, 0x2d, 0xe3, 0xff, 0xc2, 0x51, 0xf4, 0xa8, 0x82, 0x3a, 0x8d, 0x09, 0x71, 0xb3, 0x2f, + 0x7a, 0xb9, 0x5c, 0x91, 0xe0, 0x7a, 0x45, 0x82, 0x9b, 0x15, 0x09, 0xbe, 0x94, 0x04, 0x2d, 0x4b, + 0x82, 0xae, 0x4b, 0x82, 0x6e, 0x4a, 0x82, 0x7e, 0x96, 0x04, 0x7d, 0xfb, 0x45, 0x82, 0xcb, 0xfe, + 0xae, 0x67, 0xf7, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x05, 0xe9, 0xaa, 0x07, 0xb2, 0x03, 0x00, 0x00, +} + +func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterTrustBundleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterTrustBundleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TrustBundle) + copy(dAtA[i:], m.TrustBundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TrustBundle))) + i-- + dAtA[i] = 0x12 + i -= len(m.SignerName) + copy(dAtA[i:], m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterTrustBundle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterTrustBundleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterTrustBundleSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TrustBundle) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterTrustBundle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundle{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterTrustBundle{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterTrustBundle", "ClusterTrustBundle", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterTrustBundleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundleSpec{`, + `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`, + `TrustBundle:` + fmt.Sprintf("%v", this.TrustBundle) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterTrustBundle{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustBundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustBundle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto new file mode 100644 index 00000000..b0ebc4bd --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto @@ -0,0 +1,103 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.certificates.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/certificates/v1alpha1"; + +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors +// (root certificates). +// +// ClusterTrustBundle objects are considered to be readable by any authenticated +// user in the cluster, because they can be mounted by pods using the +// `clusterTrustBundle` projection. All service accounts have read access to +// ClusterTrustBundles by default. Users who only have namespace-level access +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount +// that they have access to. +// +// It can be optionally associated with a particular assigner, in which case it +// contains one valid set of trust anchors for that signer. Signers may have +// multiple associated ClusterTrustBundles; each is an independent set of trust +// anchors for that signer. Admission control is used to enforce that only users +// with permissions on the signer can create or modify the corresponding bundle. +message ClusterTrustBundle { + // metadata contains the object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the signer (if any) and trust anchors. + optional ClusterTrustBundleSpec spec = 2; +} + +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects +message ClusterTrustBundleList { + // metadata contains the list metadata. + // + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a collection of ClusterTrustBundle objects + repeated ClusterTrustBundle items = 2; +} + +// ClusterTrustBundleSpec contains the signer and trust anchors. +message ClusterTrustBundleSpec { + // signerName indicates the associated signer, if any. + // + // In order to create or update a ClusterTrustBundle that sets signerName, + // you must have the following cluster-scoped permission: + // group=certificates.k8s.io resource=signers resourceName= + // verb=attest. + // + // If signerName is not empty, then the ClusterTrustBundle object must be + // named with the signer name as a prefix (translating slashes to colons). + // For example, for the signer name `example.com/foo`, valid + // ClusterTrustBundle object names include `example.com:foo:abc` and + // `example.com:foo:v1`. + // + // If signerName is empty, then the ClusterTrustBundle object's name must + // not have such a prefix. + // + // List/watch requests for ClusterTrustBundles can filter on this field + // using a `spec.signerName=NAME` field selector. + // + // +optional + optional string signerName = 1; + + // trustBundle contains the individual X.509 trust anchors for this + // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. + // + // The data must consist only of PEM certificate blocks that parse as valid + // X.509 certificates. Each certificate must include a basic constraints + // extension with the CA bit set. The API server will reject objects that + // contain duplicate certificates, or that use PEM block headers. + // + // Users of ClusterTrustBundles, including Kubelet, are free to reorder and + // deduplicate certificate blocks in this file according to their own logic, + // as well as to drop PEM block headers and inter-block data. + optional string trustBundle = 2; +} + diff --git a/vendor/k8s.io/api/certificates/v1alpha1/register.go b/vendor/k8s.io/api/certificates/v1alpha1/register.go new file mode 100644 index 00000000..7288ed9a --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "certificates.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + localSchemeBuilder = &SchemeBuilder + + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ClusterTrustBundle{}, + &ClusterTrustBundleList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types.go b/vendor/k8s.io/api/certificates/v1alpha1/types.go new file mode 100644 index 00000000..1a9fda01 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/types.go @@ -0,0 +1,106 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors +// (root certificates). +// +// ClusterTrustBundle objects are considered to be readable by any authenticated +// user in the cluster, because they can be mounted by pods using the +// `clusterTrustBundle` projection. All service accounts have read access to +// ClusterTrustBundles by default. Users who only have namespace-level access +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount +// that they have access to. +// +// It can be optionally associated with a particular assigner, in which case it +// contains one valid set of trust anchors for that signer. Signers may have +// multiple associated ClusterTrustBundles; each is an independent set of trust +// anchors for that signer. Admission control is used to enforce that only users +// with permissions on the signer can create or modify the corresponding bundle. +type ClusterTrustBundle struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the signer (if any) and trust anchors. + Spec ClusterTrustBundleSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// ClusterTrustBundleSpec contains the signer and trust anchors. +type ClusterTrustBundleSpec struct { + // signerName indicates the associated signer, if any. + // + // In order to create or update a ClusterTrustBundle that sets signerName, + // you must have the following cluster-scoped permission: + // group=certificates.k8s.io resource=signers resourceName= + // verb=attest. + // + // If signerName is not empty, then the ClusterTrustBundle object must be + // named with the signer name as a prefix (translating slashes to colons). + // For example, for the signer name `example.com/foo`, valid + // ClusterTrustBundle object names include `example.com:foo:abc` and + // `example.com:foo:v1`. + // + // If signerName is empty, then the ClusterTrustBundle object's name must + // not have such a prefix. + // + // List/watch requests for ClusterTrustBundles can filter on this field + // using a `spec.signerName=NAME` field selector. + // + // +optional + SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,opt,name=signerName"` + + // trustBundle contains the individual X.509 trust anchors for this + // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. + // + // The data must consist only of PEM certificate blocks that parse as valid + // X.509 certificates. Each certificate must include a basic constraints + // extension with the CA bit set. The API server will reject objects that + // contain duplicate certificates, or that use PEM block headers. + // + // Users of ClusterTrustBundles, including Kubelet, are free to reorder and + // deduplicate certificate blocks in this file according to their own logic, + // as well as to drop PEM block headers and inter-block data. + TrustBundle string `json:"trustBundle" protobuf:"bytes,2,opt,name=trustBundle"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects +type ClusterTrustBundleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the list metadata. + // + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a collection of ClusterTrustBundle objects + Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..bff649e3 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,60 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ClusterTrustBundle = map[string]string{ + "": "ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).\n\nClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.\n\nIt can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle.", + "metadata": "metadata contains the object metadata.", + "spec": "spec contains the signer (if any) and trust anchors.", +} + +func (ClusterTrustBundle) SwaggerDoc() map[string]string { + return map_ClusterTrustBundle +} + +var map_ClusterTrustBundleList = map[string]string{ + "": "ClusterTrustBundleList is a collection of ClusterTrustBundle objects", + "metadata": "metadata contains the list metadata.", + "items": "items is a collection of ClusterTrustBundle objects", +} + +func (ClusterTrustBundleList) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleList +} + +var map_ClusterTrustBundleSpec = map[string]string{ + "": "ClusterTrustBundleSpec contains the signer and trust anchors.", + "signerName": "signerName indicates the associated signer, if any.\n\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName= verb=attest.\n\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\n\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\n\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.", + "trustBundle": "trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\n\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\n\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.", +} + +func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..30a4dc1e --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,102 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundle) DeepCopyInto(out *ClusterTrustBundle) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundle. +func (in *ClusterTrustBundle) DeepCopy() *ClusterTrustBundle { + if in == nil { + return nil + } + out := new(ClusterTrustBundle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTrustBundle) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleList) DeepCopyInto(out *ClusterTrustBundleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterTrustBundle, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleList. +func (in *ClusterTrustBundleList) DeepCopy() *ClusterTrustBundleList { + if in == nil { + return nil + } + out := new(ClusterTrustBundleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTrustBundleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleSpec) DeepCopyInto(out *ClusterTrustBundleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleSpec. +func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec { + if in == nil { + return nil + } + out := new(ClusterTrustBundleSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 00000000..dfafa656 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterTrustBundle) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterTrustBundle) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterTrustBundle) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterTrustBundleList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index e246fba0..f70f01ef 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -124,8 +124,10 @@ message CertificateSigningRequestSpec { // allowedUsages specifies a set of usage contexts the key will be // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // See: + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // // Valid values are: // "signing", // "digital signature", diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go index fe7aab97..7e5a5c19 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -89,8 +89,10 @@ type CertificateSigningRequestSpec struct { // allowedUsages specifies a set of usage contexts the key will be // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // See: + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // // Valid values are: // "signing", // "digital signature", @@ -229,8 +231,9 @@ type CertificateSigningRequestList struct { } // KeyUsages specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// See: // +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 type KeyUsage string diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go index d3f31815..f9ab1f13 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ @@ -55,7 +55,7 @@ var map_CertificateSigningRequestSpec = map[string]string{ "request": "Base64-encoded PKCS#10 CSR data", "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.", "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.", - "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See:\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.3\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.12\n\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", "username": "Information about the requesting user. See user.Info interface for details.", "uid": "UID information about the requesting user. See user.Info interface for details.", "groups": "Group information about the requesting user. See user.Info interface for details.", diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index b1efb737..36fce60f 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -34,7 +34,7 @@ message Lease { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; @@ -47,7 +47,7 @@ message LeaseList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated Lease items = 2; } @@ -59,7 +59,7 @@ message LeaseSpec { // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go index 7a5605ac..b0e1d068 100644 --- a/vendor/k8s.io/api/coordination/v1/types.go +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -30,7 +30,7 @@ type Lease struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -43,7 +43,7 @@ type LeaseSpec struct { HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` // acquireTime is a time when the current lease was acquired. @@ -69,6 +69,6 @@ type LeaseList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go index 0f144043..f3720eca 100644 --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (LeaseList) SwaggerDoc() map[string]string { @@ -50,7 +50,7 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 85faa3b0..92c8918b 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -34,7 +34,7 @@ message Lease { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; @@ -47,7 +47,7 @@ message LeaseList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated Lease items = 2; } @@ -59,7 +59,7 @@ message LeaseSpec { // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go index 8f300fca..3a3d5f32 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -33,7 +33,7 @@ type Lease struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -46,7 +46,7 @@ type LeaseSpec struct { HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` // acquireTime is a time when the current lease was acquired. @@ -75,6 +75,6 @@ type LeaseList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index f557d265..78ca4e39 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (LeaseList) SwaggerDoc() map[string]string { @@ -50,7 +50,7 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index eb9517e1..61f86f85 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file should be consistent with pkg/api/annotation_key_constants.go. +// This file should be consistent with pkg/apis/core/annotation_key_constants.go. package v1 @@ -144,8 +144,19 @@ const ( // This annotation is beta-level and is only honored when PodDeletionCost feature is enabled. PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost" - // AnnotationTopologyAwareHints can be used to enable or disable Topology - // Aware Hints for a Service. This may be set to "Auto" or "Disabled". Any - // other value is treated as "Disabled". - AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" + // DeprecatedAnnotationTopologyAwareHints can be used to enable or disable + // Topology Aware Hints for a Service. This may be set to "Auto" or + // "Disabled". Any other value is treated as "Disabled". This annotation has + // been deprecated in favor of the "service.kubernetes.io/topology-mode" + // annotation. + DeprecatedAnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" + + // AnnotationTopologyMode can be used to enable or disable Topology Aware + // Routing for a Service. Well known values are "Auto" and "Disabled". + // Implementations may choose to develop new topology approaches, exposing + // them with domain-prefixed values. For example, "example.com/lowest-rtt" + // could be a valid implementation-specific value for this annotation. These + // heuristics will often populate topology hints on EndpointSlices, but that + // is not a requirement. + AnnotationTopologyMode = "service.kubernetes.io/topology-mode" ) diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index a8df2b22..c7664629 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -889,10 +889,38 @@ func (m *ContainerPort) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerPort proto.InternalMessageInfo +func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } +func (*ContainerResizePolicy) ProtoMessage() {} +func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{30} +} +func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResizePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResizePolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResizePolicy.Merge(m, src) +} +func (m *ContainerResizePolicy) XXX_Size() int { + return m.Size() +} +func (m *ContainerResizePolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResizePolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo + func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{30} + return fileDescriptor_83c10c24ec417dc9, []int{31} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +948,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{31} + return fileDescriptor_83c10c24ec417dc9, []int{32} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +976,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{32} + return fileDescriptor_83c10c24ec417dc9, []int{33} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +1004,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{33} + return fileDescriptor_83c10c24ec417dc9, []int{34} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1032,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{34} + return fileDescriptor_83c10c24ec417dc9, []int{35} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1060,7 @@ var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{35} + return fileDescriptor_83c10c24ec417dc9, []int{36} } func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1088,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{36} + return fileDescriptor_83c10c24ec417dc9, []int{37} } func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1116,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{37} + return fileDescriptor_83c10c24ec417dc9, []int{38} } func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1144,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{38} + return fileDescriptor_83c10c24ec417dc9, []int{39} } func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1172,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{39} + return fileDescriptor_83c10c24ec417dc9, []int{40} } func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1200,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} func (*EndpointAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{40} + return fileDescriptor_83c10c24ec417dc9, []int{41} } func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1228,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{41} + return fileDescriptor_83c10c24ec417dc9, []int{42} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1256,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} func (*EndpointSubset) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{42} + return fileDescriptor_83c10c24ec417dc9, []int{43} } func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1284,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} func (*Endpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{43} + return fileDescriptor_83c10c24ec417dc9, []int{44} } func (m *Endpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1312,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} func (*EndpointsList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{44} + return fileDescriptor_83c10c24ec417dc9, []int{45} } func (m *EndpointsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1340,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} func (*EnvFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{45} + return fileDescriptor_83c10c24ec417dc9, []int{46} } func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1368,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} func (*EnvVar) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{46} + return fileDescriptor_83c10c24ec417dc9, []int{47} } func (m *EnvVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1396,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} func (*EnvVarSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{47} + return fileDescriptor_83c10c24ec417dc9, []int{48} } func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1424,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } func (*EphemeralContainer) ProtoMessage() {} func (*EphemeralContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{48} + return fileDescriptor_83c10c24ec417dc9, []int{49} } func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1452,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } func (*EphemeralContainerCommon) ProtoMessage() {} func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{49} + return fileDescriptor_83c10c24ec417dc9, []int{50} } func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1480,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } func (*EphemeralVolumeSource) ProtoMessage() {} func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{50} + return fileDescriptor_83c10c24ec417dc9, []int{51} } func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1508,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{51} + return fileDescriptor_83c10c24ec417dc9, []int{52} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1536,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{52} + return fileDescriptor_83c10c24ec417dc9, []int{53} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1564,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{53} + return fileDescriptor_83c10c24ec417dc9, []int{54} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1592,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{54} + return fileDescriptor_83c10c24ec417dc9, []int{55} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1620,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{55} + return fileDescriptor_83c10c24ec417dc9, []int{56} } func (m *ExecAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1648,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{56} + return fileDescriptor_83c10c24ec417dc9, []int{57} } func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1676,7 @@ var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{57} + return fileDescriptor_83c10c24ec417dc9, []int{58} } func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1704,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{58} + return fileDescriptor_83c10c24ec417dc9, []int{59} } func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1732,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{59} + return fileDescriptor_83c10c24ec417dc9, []int{60} } func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,7 +1760,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{60} + return fileDescriptor_83c10c24ec417dc9, []int{61} } func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1788,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo func (m *GRPCAction) Reset() { *m = GRPCAction{} } func (*GRPCAction) ProtoMessage() {} func (*GRPCAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{61} + return fileDescriptor_83c10c24ec417dc9, []int{62} } func (m *GRPCAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1816,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{62} + return fileDescriptor_83c10c24ec417dc9, []int{63} } func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1844,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{63} + return fileDescriptor_83c10c24ec417dc9, []int{64} } func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1872,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{64} + return fileDescriptor_83c10c24ec417dc9, []int{65} } func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1872,7 +1900,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{65} + return fileDescriptor_83c10c24ec417dc9, []int{66} } func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1900,7 +1928,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{66} + return fileDescriptor_83c10c24ec417dc9, []int{67} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1928,7 +1956,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{67} + return fileDescriptor_83c10c24ec417dc9, []int{68} } func (m *HostAlias) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1956,7 +1984,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{68} + return fileDescriptor_83c10c24ec417dc9, []int{69} } func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +2012,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{69} + return fileDescriptor_83c10c24ec417dc9, []int{70} } func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,7 +2040,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{70} + return fileDescriptor_83c10c24ec417dc9, []int{71} } func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2040,7 +2068,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{71} + return fileDescriptor_83c10c24ec417dc9, []int{72} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2068,7 +2096,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{72} + return fileDescriptor_83c10c24ec417dc9, []int{73} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2096,7 +2124,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{73} + return fileDescriptor_83c10c24ec417dc9, []int{74} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,7 +2152,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{74} + return fileDescriptor_83c10c24ec417dc9, []int{75} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2152,7 +2180,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{75} + return fileDescriptor_83c10c24ec417dc9, []int{76} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2208,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{76} + return fileDescriptor_83c10c24ec417dc9, []int{77} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2236,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{77} + return fileDescriptor_83c10c24ec417dc9, []int{78} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2264,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{78} + return fileDescriptor_83c10c24ec417dc9, []int{79} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2292,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{79} + return fileDescriptor_83c10c24ec417dc9, []int{80} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2320,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{80} + return fileDescriptor_83c10c24ec417dc9, []int{81} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2348,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{81} + return fileDescriptor_83c10c24ec417dc9, []int{82} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2348,7 +2376,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{82} + return fileDescriptor_83c10c24ec417dc9, []int{83} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2404,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{83} + return fileDescriptor_83c10c24ec417dc9, []int{84} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2432,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{84} + return fileDescriptor_83c10c24ec417dc9, []int{85} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2460,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{85} + return fileDescriptor_83c10c24ec417dc9, []int{86} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2488,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{86} + return fileDescriptor_83c10c24ec417dc9, []int{87} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2516,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{87} + return fileDescriptor_83c10c24ec417dc9, []int{88} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2544,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{88} + return fileDescriptor_83c10c24ec417dc9, []int{89} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2572,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{89} + return fileDescriptor_83c10c24ec417dc9, []int{90} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2600,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{90} + return fileDescriptor_83c10c24ec417dc9, []int{91} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2628,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{91} + return fileDescriptor_83c10c24ec417dc9, []int{92} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2656,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{92} + return fileDescriptor_83c10c24ec417dc9, []int{93} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2684,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{93} + return fileDescriptor_83c10c24ec417dc9, []int{94} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2712,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{94} + return fileDescriptor_83c10c24ec417dc9, []int{95} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2740,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{95} + return fileDescriptor_83c10c24ec417dc9, []int{96} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2768,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{96} + return fileDescriptor_83c10c24ec417dc9, []int{97} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2796,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{97} + return fileDescriptor_83c10c24ec417dc9, []int{98} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2824,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} func (*NodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{98} + return fileDescriptor_83c10c24ec417dc9, []int{99} } func (m *NodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2852,7 @@ var xxx_messageInfo_NodeResources proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{99} + return fileDescriptor_83c10c24ec417dc9, []int{100} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +2880,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{100} + return fileDescriptor_83c10c24ec417dc9, []int{101} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2880,7 +2908,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{101} + return fileDescriptor_83c10c24ec417dc9, []int{102} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +2936,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{102} + return fileDescriptor_83c10c24ec417dc9, []int{103} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +2964,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{103} + return fileDescriptor_83c10c24ec417dc9, []int{104} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +2992,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{104} + return fileDescriptor_83c10c24ec417dc9, []int{105} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3020,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{105} + return fileDescriptor_83c10c24ec417dc9, []int{106} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3048,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{106} + return fileDescriptor_83c10c24ec417dc9, []int{107} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3076,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{107} + return fileDescriptor_83c10c24ec417dc9, []int{108} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3104,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{108} + return fileDescriptor_83c10c24ec417dc9, []int{109} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3132,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{109} + return fileDescriptor_83c10c24ec417dc9, []int{110} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3160,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{110} + return fileDescriptor_83c10c24ec417dc9, []int{111} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3188,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{111} + return fileDescriptor_83c10c24ec417dc9, []int{112} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3216,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{112} + return fileDescriptor_83c10c24ec417dc9, []int{113} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3244,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{113} + return fileDescriptor_83c10c24ec417dc9, []int{114} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3272,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{114} + return fileDescriptor_83c10c24ec417dc9, []int{115} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3300,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{115} + return fileDescriptor_83c10c24ec417dc9, []int{116} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3328,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{116} + return fileDescriptor_83c10c24ec417dc9, []int{117} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{117} + return fileDescriptor_83c10c24ec417dc9, []int{118} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{118} + return fileDescriptor_83c10c24ec417dc9, []int{119} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{119} + return fileDescriptor_83c10c24ec417dc9, []int{120} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3440,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{120} + return fileDescriptor_83c10c24ec417dc9, []int{121} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3468,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{121} + return fileDescriptor_83c10c24ec417dc9, []int{122} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3496,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{122} + return fileDescriptor_83c10c24ec417dc9, []int{123} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3524,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{123} + return fileDescriptor_83c10c24ec417dc9, []int{124} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3552,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{124} + return fileDescriptor_83c10c24ec417dc9, []int{125} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3580,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{125} + return fileDescriptor_83c10c24ec417dc9, []int{126} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3608,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{126} + return fileDescriptor_83c10c24ec417dc9, []int{127} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3636,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{127} + return fileDescriptor_83c10c24ec417dc9, []int{128} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3664,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{128} + return fileDescriptor_83c10c24ec417dc9, []int{129} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3692,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{129} + return fileDescriptor_83c10c24ec417dc9, []int{130} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3720,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{130} + return fileDescriptor_83c10c24ec417dc9, []int{131} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3748,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{131} + return fileDescriptor_83c10c24ec417dc9, []int{132} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3776,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{132} + return fileDescriptor_83c10c24ec417dc9, []int{133} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3804,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{133} + return fileDescriptor_83c10c24ec417dc9, []int{134} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3832,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{134} + return fileDescriptor_83c10c24ec417dc9, []int{135} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3832,7 +3860,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{135} + return fileDescriptor_83c10c24ec417dc9, []int{136} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +3888,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } func (*PodResourceClaim) ProtoMessage() {} func (*PodResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{136} + return fileDescriptor_83c10c24ec417dc9, []int{137} } func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +3916,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } func (*PodSchedulingGate) ProtoMessage() {} func (*PodSchedulingGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{137} + return fileDescriptor_83c10c24ec417dc9, []int{138} } func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +3944,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{138} + return fileDescriptor_83c10c24ec417dc9, []int{139} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +3972,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{139} + return fileDescriptor_83c10c24ec417dc9, []int{140} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4000,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{140} + return fileDescriptor_83c10c24ec417dc9, []int{141} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4028,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{141} + return fileDescriptor_83c10c24ec417dc9, []int{142} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4056,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_83c10c24ec417dc9, []int{143} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4084,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_83c10c24ec417dc9, []int{144} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4112,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_83c10c24ec417dc9, []int{145} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4140,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_83c10c24ec417dc9, []int{146} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4168,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_83c10c24ec417dc9, []int{147} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4196,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_83c10c24ec417dc9, []int{148} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4224,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_83c10c24ec417dc9, []int{149} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4252,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_83c10c24ec417dc9, []int{150} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4280,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_83c10c24ec417dc9, []int{151} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4308,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_83c10c24ec417dc9, []int{152} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4336,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_83c10c24ec417dc9, []int{153} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4364,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_83c10c24ec417dc9, []int{154} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4392,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_83c10c24ec417dc9, []int{155} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4420,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_83c10c24ec417dc9, []int{156} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4448,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_83c10c24ec417dc9, []int{157} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4476,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_83c10c24ec417dc9, []int{158} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4504,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_83c10c24ec417dc9, []int{159} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4532,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_83c10c24ec417dc9, []int{160} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4560,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_83c10c24ec417dc9, []int{161} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4588,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_83c10c24ec417dc9, []int{162} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4616,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_83c10c24ec417dc9, []int{163} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4644,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_83c10c24ec417dc9, []int{164} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4672,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_83c10c24ec417dc9, []int{165} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4700,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_83c10c24ec417dc9, []int{166} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4728,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_83c10c24ec417dc9, []int{167} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4756,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_83c10c24ec417dc9, []int{168} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4784,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_83c10c24ec417dc9, []int{169} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4812,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_83c10c24ec417dc9, []int{170} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4840,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_83c10c24ec417dc9, []int{171} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4868,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_83c10c24ec417dc9, []int{172} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +4896,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_83c10c24ec417dc9, []int{173} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +4924,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_83c10c24ec417dc9, []int{174} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +4952,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_83c10c24ec417dc9, []int{175} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +4980,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_83c10c24ec417dc9, []int{176} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5008,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_83c10c24ec417dc9, []int{177} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5036,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_83c10c24ec417dc9, []int{178} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5064,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_83c10c24ec417dc9, []int{179} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5092,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_83c10c24ec417dc9, []int{180} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5120,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_83c10c24ec417dc9, []int{181} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5148,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_83c10c24ec417dc9, []int{182} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5176,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_83c10c24ec417dc9, []int{183} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5204,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_83c10c24ec417dc9, []int{184} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5232,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_83c10c24ec417dc9, []int{185} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5260,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_83c10c24ec417dc9, []int{186} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5288,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_83c10c24ec417dc9, []int{187} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5316,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_83c10c24ec417dc9, []int{188} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5344,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_83c10c24ec417dc9, []int{189} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5372,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_83c10c24ec417dc9, []int{190} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5400,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_83c10c24ec417dc9, []int{191} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5428,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_83c10c24ec417dc9, []int{192} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5456,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_83c10c24ec417dc9, []int{193} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5484,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_83c10c24ec417dc9, []int{194} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5512,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_83c10c24ec417dc9, []int{195} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5540,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_83c10c24ec417dc9, []int{196} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5568,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_83c10c24ec417dc9, []int{197} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5596,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_83c10c24ec417dc9, []int{198} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5624,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5652,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{200} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5680,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5708,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5736,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5764,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5792,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5820,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5848,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +5876,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{207} + return fileDescriptor_83c10c24ec417dc9, []int{208} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +5904,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{208} + return fileDescriptor_83c10c24ec417dc9, []int{209} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +5932,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{209} + return fileDescriptor_83c10c24ec417dc9, []int{210} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +5960,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{210} + return fileDescriptor_83c10c24ec417dc9, []int{211} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +5988,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{211} + return fileDescriptor_83c10c24ec417dc9, []int{212} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +6016,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{212} + return fileDescriptor_83c10c24ec417dc9, []int{213} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +6044,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{213} + return fileDescriptor_83c10c24ec417dc9, []int{214} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6044,7 +6072,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{214} + return fileDescriptor_83c10c24ec417dc9, []int{215} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6104,11 +6132,13 @@ func init() { proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerResizePolicy)(nil), "k8s.io.api.core.v1.ContainerResizePolicy") proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ContainerStatus.AllocatedResourcesEntry") proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") @@ -6320,917 +6350,925 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 14547 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x69, 0x8c, 0x24, 0xd7, - 0x79, 0x98, 0xaa, 0x7b, 0xae, 0xfe, 0xe6, 0x7e, 0xb3, 0xbb, 0x9c, 0x1d, 0x72, 0x77, 0x96, 0x45, - 0x72, 0xb9, 0x14, 0xc9, 0x19, 0x2d, 0x0f, 0x89, 0x26, 0x25, 0x5a, 0x73, 0xee, 0x36, 0x77, 0x67, - 0xb6, 0xf9, 0x7a, 0x76, 0x57, 0x07, 0x25, 0xa8, 0xa6, 0xfb, 0xcd, 0x4c, 0x69, 0xba, 0xab, 0x9a, - 0x55, 0xd5, 0xb3, 0x3b, 0x8c, 0x84, 0x38, 0xf2, 0x29, 0xdb, 0x09, 0x84, 0xc0, 0x39, 0x20, 0x1b, - 0x46, 0xe0, 0x38, 0xb6, 0x15, 0xe5, 0x52, 0xe4, 0xd8, 0x8e, 0xe5, 0xd8, 0xce, 0xed, 0x04, 0x81, - 0xe3, 0x18, 0x88, 0x65, 0xc0, 0xc8, 0xc4, 0x5e, 0x07, 0x30, 0x04, 0x24, 0xb6, 0x73, 0x01, 0xc9, - 0xc4, 0x89, 0x83, 0x77, 0xd6, 0x7b, 0x75, 0x74, 0xf7, 0x2c, 0x67, 0x47, 0x94, 0xc0, 0x7f, 0xdd, - 0xdf, 0xf7, 0xbd, 0xef, 0xbd, 0x7a, 0xe7, 0xf7, 0xbe, 0xef, 0x7b, 0xdf, 0x07, 0xaf, 0xec, 0xbe, - 0x14, 0xce, 0xb9, 0xfe, 0xfc, 0x6e, 0x7b, 0x93, 0x04, 0x1e, 0x89, 0x48, 0x38, 0xbf, 0x47, 0xbc, - 0xba, 0x1f, 0xcc, 0x0b, 0x84, 0xd3, 0x72, 0xe7, 0x6b, 0x7e, 0x40, 0xe6, 0xf7, 0x2e, 0xcf, 0x6f, - 0x13, 0x8f, 0x04, 0x4e, 0x44, 0xea, 0x73, 0xad, 0xc0, 0x8f, 0x7c, 0x84, 0x38, 0xcd, 0x9c, 0xd3, - 0x72, 0xe7, 0x28, 0xcd, 0xdc, 0xde, 0xe5, 0x99, 0x67, 0xb7, 0xdd, 0x68, 0xa7, 0xbd, 0x39, 0x57, - 0xf3, 0x9b, 0xf3, 0xdb, 0xfe, 0xb6, 0x3f, 0xcf, 0x48, 0x37, 0xdb, 0x5b, 0xec, 0x1f, 0xfb, 0xc3, - 0x7e, 0x71, 0x16, 0x33, 0x2f, 0xc4, 0xd5, 0x34, 0x9d, 0xda, 0x8e, 0xeb, 0x91, 0x60, 0x7f, 0xbe, - 0xb5, 0xbb, 0xcd, 0xea, 0x0d, 0x48, 0xe8, 0xb7, 0x83, 0x1a, 0x49, 0x56, 0xdc, 0xb1, 0x54, 0x38, - 0xdf, 0x24, 0x91, 0x93, 0xd1, 0xdc, 0x99, 0xf9, 0xbc, 0x52, 0x41, 0xdb, 0x8b, 0xdc, 0x66, 0xba, - 0x9a, 0xf7, 0x77, 0x2b, 0x10, 0xd6, 0x76, 0x48, 0xd3, 0x49, 0x95, 0x7b, 0x3e, 0xaf, 0x5c, 0x3b, - 0x72, 0x1b, 0xf3, 0xae, 0x17, 0x85, 0x51, 0x90, 0x2c, 0x64, 0x7f, 0xdd, 0x82, 0x0b, 0x0b, 0xb7, - 0xab, 0x2b, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x62, 0xc3, 0xaf, 0xed, 0x56, 0x23, 0x3f, 0x20, 0xb7, - 0xfc, 0x46, 0xbb, 0x49, 0xaa, 0xac, 0x23, 0xd0, 0x33, 0x30, 0xb4, 0xc7, 0xfe, 0x97, 0x97, 0xa7, - 0xad, 0x0b, 0xd6, 0xa5, 0xd2, 0xe2, 0xc4, 0xaf, 0x1d, 0xcc, 0xbe, 0xe7, 0xde, 0xc1, 0xec, 0xd0, - 0x2d, 0x01, 0xc7, 0x8a, 0x02, 0x5d, 0x84, 0x81, 0xad, 0x70, 0x63, 0xbf, 0x45, 0xa6, 0x0b, 0x8c, - 0x76, 0x4c, 0xd0, 0x0e, 0xac, 0x56, 0x29, 0x14, 0x0b, 0x2c, 0x9a, 0x87, 0x52, 0xcb, 0x09, 0x22, - 0x37, 0x72, 0x7d, 0x6f, 0xba, 0x78, 0xc1, 0xba, 0xd4, 0xbf, 0x38, 0x29, 0x48, 0x4b, 0x15, 0x89, - 0xc0, 0x31, 0x0d, 0x6d, 0x46, 0x40, 0x9c, 0xfa, 0x0d, 0xaf, 0xb1, 0x3f, 0xdd, 0x77, 0xc1, 0xba, - 0x34, 0x14, 0x37, 0x03, 0x0b, 0x38, 0x56, 0x14, 0xf6, 0x17, 0x0b, 0x30, 0xb4, 0xb0, 0xb5, 0xe5, - 0x7a, 0x6e, 0xb4, 0x8f, 0x6e, 0xc1, 0x88, 0xe7, 0xd7, 0x89, 0xfc, 0xcf, 0xbe, 0x62, 0xf8, 0xb9, - 0x0b, 0x73, 0xe9, 0xa9, 0x34, 0xb7, 0xae, 0xd1, 0x2d, 0x4e, 0xdc, 0x3b, 0x98, 0x1d, 0xd1, 0x21, - 0xd8, 0xe0, 0x83, 0x30, 0x0c, 0xb7, 0xfc, 0xba, 0x62, 0x5b, 0x60, 0x6c, 0x67, 0xb3, 0xd8, 0x56, - 0x62, 0xb2, 0xc5, 0xf1, 0x7b, 0x07, 0xb3, 0xc3, 0x1a, 0x00, 0xeb, 0x4c, 0xd0, 0x26, 0x8c, 0xd3, - 0xbf, 0x5e, 0xe4, 0x2a, 0xbe, 0x45, 0xc6, 0xf7, 0xb1, 0x3c, 0xbe, 0x1a, 0xe9, 0xe2, 0xd4, 0xbd, - 0x83, 0xd9, 0xf1, 0x04, 0x10, 0x27, 0x19, 0xda, 0x6f, 0xc1, 0xd8, 0x42, 0x14, 0x39, 0xb5, 0x1d, - 0x52, 0xe7, 0x23, 0x88, 0x5e, 0x80, 0x3e, 0xcf, 0x69, 0x12, 0x31, 0xbe, 0x17, 0x44, 0xc7, 0xf6, - 0xad, 0x3b, 0x4d, 0x72, 0x78, 0x30, 0x3b, 0x71, 0xd3, 0x73, 0xdf, 0x6c, 0x8b, 0x59, 0x41, 0x61, - 0x98, 0x51, 0xa3, 0xe7, 0x00, 0xea, 0x64, 0xcf, 0xad, 0x91, 0x8a, 0x13, 0xed, 0x88, 0xf1, 0x46, - 0xa2, 0x2c, 0x2c, 0x2b, 0x0c, 0xd6, 0xa8, 0xec, 0xbb, 0x50, 0x5a, 0xd8, 0xf3, 0xdd, 0x7a, 0xc5, - 0xaf, 0x87, 0x68, 0x17, 0xc6, 0x5b, 0x01, 0xd9, 0x22, 0x81, 0x02, 0x4d, 0x5b, 0x17, 0x8a, 0x97, - 0x86, 0x9f, 0xbb, 0x94, 0xf9, 0xb1, 0x26, 0xe9, 0x8a, 0x17, 0x05, 0xfb, 0x8b, 0x0f, 0x89, 0xfa, - 0xc6, 0x13, 0x58, 0x9c, 0xe4, 0x6c, 0xff, 0xb3, 0x02, 0x9c, 0x5e, 0x78, 0xab, 0x1d, 0x90, 0x65, - 0x37, 0xdc, 0x4d, 0xce, 0xf0, 0xba, 0x1b, 0xee, 0xae, 0xc7, 0x3d, 0xa0, 0xa6, 0xd6, 0xb2, 0x80, - 0x63, 0x45, 0x81, 0x9e, 0x85, 0x41, 0xfa, 0xfb, 0x26, 0x2e, 0x8b, 0x4f, 0x9e, 0x12, 0xc4, 0xc3, - 0xcb, 0x4e, 0xe4, 0x2c, 0x73, 0x14, 0x96, 0x34, 0x68, 0x0d, 0x86, 0x6b, 0x6c, 0x41, 0x6e, 0xaf, - 0xf9, 0x75, 0xc2, 0x06, 0xb3, 0xb4, 0xf8, 0x34, 0x25, 0x5f, 0x8a, 0xc1, 0x87, 0x07, 0xb3, 0xd3, - 0xbc, 0x6d, 0x82, 0x85, 0x86, 0xc3, 0x7a, 0x79, 0x64, 0xab, 0xf5, 0xd5, 0xc7, 0x38, 0x41, 0xc6, - 0xda, 0xba, 0xa4, 0x2d, 0x95, 0x7e, 0xb6, 0x54, 0x46, 0xb2, 0x97, 0x09, 0xba, 0x0c, 0x7d, 0xbb, - 0xae, 0x57, 0x9f, 0x1e, 0x60, 0xbc, 0xce, 0xd1, 0x31, 0xbf, 0xe6, 0x7a, 0xf5, 0xc3, 0x83, 0xd9, - 0x49, 0xa3, 0x39, 0x14, 0x88, 0x19, 0xa9, 0xfd, 0xdf, 0x2d, 0x98, 0x65, 0xb8, 0x55, 0xb7, 0x41, - 0x2a, 0x24, 0x08, 0xdd, 0x30, 0x22, 0x5e, 0x64, 0x74, 0xe8, 0x73, 0x00, 0x21, 0xa9, 0x05, 0x24, - 0xd2, 0xba, 0x54, 0x4d, 0x8c, 0xaa, 0xc2, 0x60, 0x8d, 0x8a, 0x6e, 0x08, 0xe1, 0x8e, 0x13, 0xb0, - 0xf9, 0x25, 0x3a, 0x56, 0x6d, 0x08, 0x55, 0x89, 0xc0, 0x31, 0x8d, 0xb1, 0x21, 0x14, 0xbb, 0x6d, - 0x08, 0xe8, 0x43, 0x30, 0x1e, 0x57, 0x16, 0xb6, 0x9c, 0x9a, 0xec, 0x40, 0xb6, 0x64, 0xaa, 0x26, - 0x0a, 0x27, 0x69, 0xed, 0xbf, 0x69, 0x89, 0xc9, 0x43, 0xbf, 0xfa, 0x1d, 0xfe, 0xad, 0xf6, 0x2f, - 0x58, 0x30, 0xb8, 0xe8, 0x7a, 0x75, 0xd7, 0xdb, 0x46, 0x9f, 0x82, 0x21, 0x7a, 0x36, 0xd5, 0x9d, - 0xc8, 0x11, 0xfb, 0xde, 0xfb, 0xb4, 0xb5, 0xa5, 0x8e, 0x8a, 0xb9, 0xd6, 0xee, 0x36, 0x05, 0x84, - 0x73, 0x94, 0x9a, 0xae, 0xb6, 0x1b, 0x9b, 0x9f, 0x26, 0xb5, 0x68, 0x8d, 0x44, 0x4e, 0xfc, 0x39, - 0x31, 0x0c, 0x2b, 0xae, 0xe8, 0x1a, 0x0c, 0x44, 0x4e, 0xb0, 0x4d, 0x22, 0xb1, 0x01, 0x66, 0x6e, - 0x54, 0xbc, 0x24, 0xa6, 0x2b, 0x92, 0x78, 0x35, 0x12, 0x1f, 0x0b, 0x1b, 0xac, 0x28, 0x16, 0x2c, - 0xec, 0xff, 0x3b, 0x08, 0x67, 0x97, 0xaa, 0xe5, 0x9c, 0x79, 0x75, 0x11, 0x06, 0xea, 0x81, 0xbb, - 0x47, 0x02, 0xd1, 0xcf, 0x8a, 0xcb, 0x32, 0x83, 0x62, 0x81, 0x45, 0x2f, 0xc1, 0x08, 0x3f, 0x90, - 0xae, 0x3a, 0x5e, 0xbd, 0x21, 0xbb, 0xf8, 0x94, 0xa0, 0x1e, 0xb9, 0xa5, 0xe1, 0xb0, 0x41, 0x79, - 0xc4, 0x49, 0x75, 0x31, 0xb1, 0x18, 0xf3, 0x0e, 0xbb, 0xcf, 0x5b, 0x30, 0xc1, 0xab, 0x59, 0x88, - 0xa2, 0xc0, 0xdd, 0x6c, 0x47, 0x24, 0x9c, 0xee, 0x67, 0x3b, 0xdd, 0x52, 0x56, 0x6f, 0xe5, 0xf6, - 0xc0, 0xdc, 0xad, 0x04, 0x17, 0xbe, 0x09, 0x4e, 0x8b, 0x7a, 0x27, 0x92, 0x68, 0x9c, 0xaa, 0x16, - 0x7d, 0xb7, 0x05, 0x33, 0x35, 0xdf, 0x8b, 0x02, 0xbf, 0xd1, 0x20, 0x41, 0xa5, 0xbd, 0xd9, 0x70, - 0xc3, 0x1d, 0x3e, 0x4f, 0x31, 0xd9, 0x62, 0x3b, 0x41, 0xce, 0x18, 0x2a, 0x22, 0x31, 0x86, 0xe7, - 0xef, 0x1d, 0xcc, 0xce, 0x2c, 0xe5, 0xb2, 0xc2, 0x1d, 0xaa, 0x41, 0xbb, 0x80, 0xe8, 0x51, 0x5a, - 0x8d, 0x9c, 0x6d, 0x12, 0x57, 0x3e, 0xd8, 0x7b, 0xe5, 0x67, 0xee, 0x1d, 0xcc, 0xa2, 0xf5, 0x14, - 0x0b, 0x9c, 0xc1, 0x16, 0xbd, 0x09, 0xa7, 0x28, 0x34, 0xf5, 0xad, 0x43, 0xbd, 0x57, 0x37, 0x7d, - 0xef, 0x60, 0xf6, 0xd4, 0x7a, 0x06, 0x13, 0x9c, 0xc9, 0x1a, 0x7d, 0x97, 0x05, 0x67, 0xe3, 0xcf, - 0x5f, 0xb9, 0xdb, 0x72, 0xbc, 0x7a, 0x5c, 0x71, 0xa9, 0xf7, 0x8a, 0xe9, 0x9e, 0x7c, 0x76, 0x29, - 0x8f, 0x13, 0xce, 0xaf, 0x04, 0x79, 0x30, 0x45, 0x9b, 0x96, 0xac, 0x1b, 0x7a, 0xaf, 0xfb, 0xa1, - 0x7b, 0x07, 0xb3, 0x53, 0xeb, 0x69, 0x1e, 0x38, 0x8b, 0xf1, 0xcc, 0x12, 0x9c, 0xce, 0x9c, 0x9d, - 0x68, 0x02, 0x8a, 0xbb, 0x84, 0x4b, 0x5d, 0x25, 0x4c, 0x7f, 0xa2, 0x53, 0xd0, 0xbf, 0xe7, 0x34, - 0xda, 0x62, 0x61, 0x62, 0xfe, 0xe7, 0xe5, 0xc2, 0x4b, 0x96, 0xfd, 0xcf, 0x8b, 0x30, 0xbe, 0x54, - 0x2d, 0xdf, 0xd7, 0xaa, 0xd7, 0x8f, 0xbd, 0x42, 0xc7, 0x63, 0x2f, 0x3e, 0x44, 0x8b, 0xb9, 0x87, - 0xe8, 0x9f, 0xcd, 0x58, 0xb2, 0x7d, 0x6c, 0xc9, 0x7e, 0x47, 0xce, 0x92, 0x3d, 0xe6, 0x85, 0xba, - 0x97, 0x33, 0x6b, 0xfb, 0xd9, 0x00, 0x66, 0x4a, 0x48, 0xd7, 0xfd, 0x9a, 0xd3, 0x48, 0x6e, 0xb5, - 0x47, 0x9c, 0xba, 0xc7, 0x33, 0x8e, 0x35, 0x18, 0x59, 0x72, 0x5a, 0xce, 0xa6, 0xdb, 0x70, 0x23, - 0x97, 0x84, 0xe8, 0x49, 0x28, 0x3a, 0xf5, 0x3a, 0x93, 0xee, 0x4a, 0x8b, 0xa7, 0xef, 0x1d, 0xcc, - 0x16, 0x17, 0xea, 0x54, 0xcc, 0x00, 0x45, 0xb5, 0x8f, 0x29, 0x05, 0x7a, 0x2f, 0xf4, 0xd5, 0x03, - 0xbf, 0x35, 0x5d, 0x60, 0x94, 0x74, 0x95, 0xf7, 0x2d, 0x07, 0x7e, 0x2b, 0x41, 0xca, 0x68, 0xec, - 0x5f, 0x2d, 0xc0, 0x23, 0x4b, 0xa4, 0xb5, 0xb3, 0x5a, 0xcd, 0x39, 0x2f, 0x2e, 0xc1, 0x50, 0xd3, - 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0x51, 0x35, 0x9b, 0x11, 0x6b, 0x02, 0x86, 0x15, 0x16, 0x5d, 0x80, - 0xbe, 0x56, 0x2c, 0xc4, 0x8e, 0x48, 0x01, 0x98, 0x89, 0xaf, 0x0c, 0x43, 0x29, 0xda, 0x21, 0x09, - 0xc4, 0x8c, 0x51, 0x14, 0x37, 0x43, 0x12, 0x60, 0x86, 0x89, 0x25, 0x01, 0x2a, 0x23, 0x88, 0x13, - 0x21, 0x21, 0x09, 0x50, 0x0c, 0xd6, 0xa8, 0x50, 0x05, 0x4a, 0x61, 0x62, 0x64, 0x7b, 0x5a, 0x9a, - 0xa3, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, 0xae, 0xa2, 0xc2, 0xd7, 0x0a, - 0x80, 0x78, 0x17, 0x7e, 0x8b, 0x75, 0xdc, 0xcd, 0x74, 0xc7, 0xf5, 0xbe, 0x24, 0x8e, 0xab, 0xf7, - 0xfe, 0x87, 0x05, 0x8f, 0x2c, 0xb9, 0x5e, 0x9d, 0x04, 0x39, 0x13, 0xf0, 0xc1, 0xdc, 0x9d, 0x8f, - 0x26, 0xa4, 0x18, 0x53, 0xac, 0xef, 0x18, 0xa6, 0x98, 0xfd, 0x47, 0x16, 0x20, 0xfe, 0xd9, 0xef, - 0xb8, 0x8f, 0xbd, 0x99, 0xfe, 0xd8, 0x63, 0x98, 0x16, 0xf6, 0xdf, 0xb5, 0x60, 0x78, 0xa9, 0xe1, - 0xb8, 0x4d, 0xf1, 0xa9, 0x4b, 0x30, 0x29, 0x15, 0x45, 0x0c, 0xac, 0xc9, 0xfe, 0x74, 0x73, 0x9b, - 0xc4, 0x49, 0x24, 0x4e, 0xd3, 0xa3, 0x8f, 0xc3, 0x59, 0x03, 0xb8, 0x41, 0x9a, 0xad, 0x86, 0x13, - 0xe9, 0xb7, 0x02, 0x76, 0xfa, 0xe3, 0x3c, 0x22, 0x9c, 0x5f, 0xde, 0xbe, 0x0e, 0x63, 0x4b, 0x0d, - 0x97, 0x78, 0x51, 0xb9, 0xb2, 0xe4, 0x7b, 0x5b, 0xee, 0x36, 0x7a, 0x19, 0xc6, 0x22, 0xb7, 0x49, - 0xfc, 0x76, 0x54, 0x25, 0x35, 0xdf, 0x63, 0x77, 0x6d, 0xeb, 0x52, 0xff, 0x22, 0xba, 0x77, 0x30, - 0x3b, 0xb6, 0x61, 0x60, 0x70, 0x82, 0xd2, 0xfe, 0x1d, 0x3a, 0xe2, 0x7e, 0xb3, 0xe5, 0x7b, 0xc4, - 0x8b, 0x96, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0x2f, 0x43, 0x5f, 0x44, 0x47, 0x90, 0x7f, 0xf9, 0x45, - 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0x1e, 0xcc, 0x9e, 0x49, 0x97, 0x60, 0x23, 0xcb, 0xca, 0xa0, 0xef, - 0x80, 0x81, 0x30, 0x72, 0xa2, 0x76, 0x28, 0x3e, 0xf5, 0x51, 0x39, 0xfe, 0x55, 0x06, 0x3d, 0x3c, - 0x98, 0x1d, 0x57, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x29, 0x18, 0x6c, 0x92, 0x30, 0x74, 0xb6, - 0xe5, 0xf9, 0x3d, 0x2e, 0xca, 0x0e, 0xae, 0x71, 0x30, 0x96, 0x78, 0xf4, 0x18, 0xf4, 0x93, 0x20, - 0xf0, 0x03, 0xb1, 0xab, 0x8c, 0x0a, 0xc2, 0xfe, 0x15, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x5b, 0x0b, - 0xc6, 0x55, 0x5b, 0x79, 0x5d, 0x27, 0x70, 0x6f, 0xfa, 0x18, 0x40, 0x4d, 0x7e, 0x60, 0xc8, 0xce, - 0xbb, 0xe1, 0xe7, 0x2e, 0x66, 0x8a, 0x16, 0xa9, 0x6e, 0x8c, 0x39, 0x2b, 0x50, 0x88, 0x35, 0x6e, - 0xf6, 0x3f, 0xb2, 0x60, 0x2a, 0xf1, 0x45, 0xd7, 0xdd, 0x30, 0x42, 0x6f, 0xa4, 0xbe, 0x6a, 0xae, - 0xb7, 0xaf, 0xa2, 0xa5, 0xd9, 0x37, 0xa9, 0xc5, 0x27, 0x21, 0xda, 0x17, 0x5d, 0x85, 0x7e, 0x37, - 0x22, 0x4d, 0xf9, 0x31, 0x8f, 0x75, 0xfc, 0x18, 0xde, 0xaa, 0x78, 0x44, 0xca, 0xb4, 0x24, 0xe6, - 0x0c, 0xec, 0x5f, 0x2d, 0x42, 0x89, 0x4f, 0xdb, 0x35, 0xa7, 0x75, 0x02, 0x63, 0xf1, 0x34, 0x94, - 0xdc, 0x66, 0xb3, 0x1d, 0x39, 0x9b, 0xe2, 0x00, 0x1a, 0xe2, 0x9b, 0x41, 0x59, 0x02, 0x71, 0x8c, - 0x47, 0x65, 0xe8, 0x63, 0x4d, 0xe1, 0x5f, 0xf9, 0x64, 0xf6, 0x57, 0x8a, 0xb6, 0xcf, 0x2d, 0x3b, - 0x91, 0xc3, 0x65, 0x3f, 0x75, 0xf2, 0x51, 0x10, 0x66, 0x2c, 0x90, 0x03, 0xb0, 0xe9, 0x7a, 0x4e, - 0xb0, 0x4f, 0x61, 0xd3, 0x45, 0xc6, 0xf0, 0xd9, 0xce, 0x0c, 0x17, 0x15, 0x3d, 0x67, 0xab, 0x3e, - 0x2c, 0x46, 0x60, 0x8d, 0xe9, 0xcc, 0x07, 0xa0, 0xa4, 0x88, 0x8f, 0x22, 0xc2, 0xcd, 0x7c, 0x08, - 0xc6, 0x13, 0x75, 0x75, 0x2b, 0x3e, 0xa2, 0x4b, 0x80, 0xbf, 0xc8, 0xb6, 0x0c, 0xd1, 0xea, 0x15, - 0x6f, 0x4f, 0xec, 0x9c, 0x6f, 0xc1, 0xa9, 0x46, 0xc6, 0xde, 0x2b, 0xc6, 0xb5, 0xf7, 0xbd, 0xfa, - 0x11, 0xf1, 0xd9, 0xa7, 0xb2, 0xb0, 0x38, 0xb3, 0x0e, 0x2a, 0xd5, 0xf8, 0x2d, 0xba, 0x40, 0x9c, - 0x86, 0x7e, 0x41, 0xb8, 0x21, 0x60, 0x58, 0x61, 0xe9, 0x7e, 0x77, 0x4a, 0x35, 0xfe, 0x1a, 0xd9, - 0xaf, 0x92, 0x06, 0xa9, 0x45, 0x7e, 0xf0, 0x4d, 0x6d, 0xfe, 0x39, 0xde, 0xfb, 0x7c, 0xbb, 0x1c, - 0x16, 0x0c, 0x8a, 0xd7, 0xc8, 0x3e, 0x1f, 0x0a, 0xfd, 0xeb, 0x8a, 0x1d, 0xbf, 0xee, 0x2b, 0x16, - 0x8c, 0xaa, 0xaf, 0x3b, 0x81, 0x7d, 0x61, 0xd1, 0xdc, 0x17, 0xce, 0x75, 0x9c, 0xe0, 0x39, 0x3b, - 0xc2, 0xd7, 0x0a, 0x70, 0x56, 0xd1, 0xd0, 0xdb, 0x0c, 0xff, 0x23, 0x66, 0xd5, 0x3c, 0x94, 0x3c, - 0xa5, 0xd7, 0xb3, 0x4c, 0x85, 0x5a, 0xac, 0xd5, 0x8b, 0x69, 0xa8, 0x50, 0xea, 0xc5, 0xc7, 0xec, - 0x88, 0xae, 0xf0, 0x16, 0xca, 0xed, 0x45, 0x28, 0xb6, 0xdd, 0xba, 0x38, 0x60, 0xde, 0x27, 0x7b, - 0xfb, 0x66, 0x79, 0xf9, 0xf0, 0x60, 0xf6, 0xd1, 0x3c, 0x63, 0x0b, 0x3d, 0xd9, 0xc2, 0xb9, 0x9b, - 0xe5, 0x65, 0x4c, 0x0b, 0xa3, 0x05, 0x18, 0x97, 0x27, 0xf4, 0x2d, 0x2a, 0x20, 0xfa, 0x9e, 0x38, - 0x87, 0x94, 0xd6, 0x1a, 0x9b, 0x68, 0x9c, 0xa4, 0x47, 0xcb, 0x30, 0xb1, 0xdb, 0xde, 0x24, 0x0d, - 0x12, 0xf1, 0x0f, 0xbe, 0x46, 0xb8, 0x4e, 0xb7, 0x14, 0xdf, 0x25, 0xaf, 0x25, 0xf0, 0x38, 0x55, - 0xc2, 0xfe, 0x53, 0x76, 0x1e, 0x88, 0xde, 0xab, 0x04, 0x3e, 0x9d, 0x58, 0x94, 0xfb, 0x37, 0x73, - 0x3a, 0xf7, 0x32, 0x2b, 0xae, 0x91, 0xfd, 0x0d, 0x9f, 0xde, 0x25, 0xb2, 0x67, 0x85, 0x31, 0xe7, - 0xfb, 0x3a, 0xce, 0xf9, 0x9f, 0x2d, 0xc0, 0x69, 0xd5, 0x03, 0x86, 0xd8, 0xfa, 0xad, 0xde, 0x07, - 0x97, 0x61, 0xb8, 0x4e, 0xb6, 0x9c, 0x76, 0x23, 0x52, 0x06, 0x86, 0x7e, 0x6e, 0x64, 0x5a, 0x8e, - 0xc1, 0x58, 0xa7, 0x39, 0x42, 0xb7, 0xfd, 0xcf, 0x61, 0x76, 0x10, 0x47, 0x0e, 0x9d, 0xe3, 0x6a, - 0xd5, 0x58, 0xb9, 0xab, 0xe6, 0x31, 0xe8, 0x77, 0x9b, 0x54, 0x30, 0x2b, 0x98, 0xf2, 0x56, 0x99, - 0x02, 0x31, 0xc7, 0xa1, 0x27, 0x60, 0xb0, 0xe6, 0x37, 0x9b, 0x8e, 0x57, 0x67, 0x47, 0x5e, 0x69, - 0x71, 0x98, 0xca, 0x6e, 0x4b, 0x1c, 0x84, 0x25, 0x0e, 0x3d, 0x02, 0x7d, 0x4e, 0xb0, 0xcd, 0xb5, - 0x2e, 0xa5, 0xc5, 0x21, 0x5a, 0xd3, 0x42, 0xb0, 0x1d, 0x62, 0x06, 0xa5, 0x97, 0xc6, 0x3b, 0x7e, - 0xb0, 0xeb, 0x7a, 0xdb, 0xcb, 0x6e, 0x20, 0x96, 0x84, 0x3a, 0x0b, 0x6f, 0x2b, 0x0c, 0xd6, 0xa8, - 0xd0, 0x2a, 0xf4, 0xb7, 0xfc, 0x20, 0x0a, 0xa7, 0x07, 0x58, 0x77, 0x3f, 0x9a, 0xb3, 0x11, 0xf1, - 0xaf, 0xad, 0xf8, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1c, 0x5d, 0x87, 0x41, 0xe2, - 0xed, 0xad, 0x06, 0x7e, 0x73, 0x7a, 0x2a, 0x9f, 0xd3, 0x0a, 0x27, 0xe1, 0xd3, 0x2c, 0x96, 0x51, - 0x05, 0x18, 0x4b, 0x16, 0xe8, 0x3b, 0xa0, 0x48, 0xbc, 0xbd, 0xe9, 0x41, 0xc6, 0x69, 0x26, 0x87, - 0xd3, 0x2d, 0x27, 0x88, 0xf7, 0xfc, 0x15, 0x6f, 0x0f, 0xd3, 0x32, 0xe8, 0xa3, 0x50, 0x92, 0x1b, - 0x46, 0x28, 0xd4, 0x99, 0x99, 0x13, 0x56, 0x6e, 0x33, 0x98, 0xbc, 0xd9, 0x76, 0x03, 0xd2, 0x24, - 0x5e, 0x14, 0xc6, 0x3b, 0xa4, 0xc4, 0x86, 0x38, 0xe6, 0x86, 0x3e, 0x2a, 0x75, 0xe8, 0x6b, 0x7e, - 0xdb, 0x8b, 0xc2, 0xe9, 0x12, 0x6b, 0x5e, 0xa6, 0x75, 0xf3, 0x56, 0x4c, 0x97, 0x54, 0xb2, 0xf3, - 0xc2, 0xd8, 0x60, 0x85, 0x3e, 0x01, 0xa3, 0xfc, 0x3f, 0xb7, 0x11, 0x86, 0xd3, 0xa7, 0x19, 0xef, - 0x0b, 0xf9, 0xbc, 0x39, 0xe1, 0xe2, 0x69, 0xc1, 0x7c, 0x54, 0x87, 0x86, 0xd8, 0xe4, 0x86, 0x30, - 0x8c, 0x36, 0xdc, 0x3d, 0xe2, 0x91, 0x30, 0xac, 0x04, 0xfe, 0x26, 0x11, 0x2a, 0xcf, 0xb3, 0xd9, - 0x36, 0x45, 0x7f, 0x93, 0x2c, 0x4e, 0x52, 0x9e, 0xd7, 0xf5, 0x32, 0xd8, 0x64, 0x81, 0x6e, 0xc2, - 0x18, 0xbd, 0x63, 0xba, 0x31, 0xd3, 0xe1, 0x6e, 0x4c, 0xd9, 0xbd, 0x0a, 0x1b, 0x85, 0x70, 0x82, - 0x09, 0xba, 0x01, 0x23, 0x61, 0xe4, 0x04, 0x51, 0xbb, 0xc5, 0x99, 0x9e, 0xe9, 0xc6, 0x94, 0x99, - 0xa4, 0xab, 0x5a, 0x11, 0x6c, 0x30, 0x40, 0xaf, 0x41, 0xa9, 0xe1, 0x6e, 0x91, 0xda, 0x7e, 0xad, - 0x41, 0xa6, 0x47, 0x18, 0xb7, 0xcc, 0x4d, 0xe5, 0xba, 0x24, 0xe2, 0x72, 0xae, 0xfa, 0x8b, 0xe3, - 0xe2, 0xe8, 0x16, 0x9c, 0x89, 0x48, 0xd0, 0x74, 0x3d, 0x87, 0x6e, 0x06, 0xe2, 0x6a, 0xc5, 0x4c, - 0xbd, 0xa3, 0x6c, 0xb5, 0x9d, 0x17, 0xa3, 0x71, 0x66, 0x23, 0x93, 0x0a, 0xe7, 0x94, 0x46, 0x77, - 0x61, 0x3a, 0x03, 0xe3, 0x37, 0xdc, 0xda, 0xfe, 0xf4, 0x29, 0xc6, 0xf9, 0x83, 0x82, 0xf3, 0xf4, - 0x46, 0x0e, 0xdd, 0x61, 0x07, 0x1c, 0xce, 0xe5, 0x8e, 0x6e, 0xc0, 0x38, 0xdb, 0x81, 0x2a, 0xed, - 0x46, 0x43, 0x54, 0x38, 0xc6, 0x2a, 0x7c, 0x42, 0x9e, 0xc7, 0x65, 0x13, 0x7d, 0x78, 0x30, 0x0b, - 0xf1, 0x3f, 0x9c, 0x2c, 0x8d, 0x36, 0x99, 0x55, 0xb1, 0x1d, 0xb8, 0xd1, 0x3e, 0xdd, 0x37, 0xc8, - 0xdd, 0x68, 0x7a, 0xbc, 0xa3, 0x86, 0x45, 0x27, 0x55, 0xa6, 0x47, 0x1d, 0x88, 0x93, 0x0c, 0xe9, - 0x96, 0x1a, 0x46, 0x75, 0xd7, 0x9b, 0x9e, 0xe0, 0xf7, 0x12, 0xb9, 0x23, 0x55, 0x29, 0x10, 0x73, - 0x1c, 0xb3, 0x28, 0xd2, 0x1f, 0x37, 0xe8, 0xc9, 0x35, 0xc9, 0x08, 0x63, 0x8b, 0xa2, 0x44, 0xe0, - 0x98, 0x86, 0x0a, 0x93, 0x51, 0xb4, 0x3f, 0x8d, 0x18, 0xa9, 0xda, 0x58, 0x36, 0x36, 0x3e, 0x8a, - 0x29, 0xdc, 0xde, 0x84, 0x31, 0xb5, 0x11, 0xb2, 0x3e, 0x41, 0xb3, 0xd0, 0xcf, 0xc4, 0x27, 0xa1, - 0x0f, 0x2c, 0xd1, 0x26, 0x30, 0xd1, 0x0a, 0x73, 0x38, 0x6b, 0x82, 0xfb, 0x16, 0x59, 0xdc, 0x8f, - 0x08, 0xbf, 0xd3, 0x17, 0xb5, 0x26, 0x48, 0x04, 0x8e, 0x69, 0xec, 0xff, 0xc7, 0xc5, 0xd0, 0x78, - 0xb7, 0xed, 0xe1, 0x7c, 0x79, 0x06, 0x86, 0x76, 0xfc, 0x30, 0xa2, 0xd4, 0xac, 0x8e, 0xfe, 0x58, - 0xf0, 0xbc, 0x2a, 0xe0, 0x58, 0x51, 0xa0, 0x57, 0x60, 0xb4, 0xa6, 0x57, 0x20, 0x0e, 0x47, 0xb5, - 0x8d, 0x18, 0xb5, 0x63, 0x93, 0x16, 0xbd, 0x04, 0x43, 0xcc, 0x4b, 0xa6, 0xe6, 0x37, 0x84, 0xd4, - 0x26, 0x4f, 0xf8, 0xa1, 0x8a, 0x80, 0x1f, 0x6a, 0xbf, 0xb1, 0xa2, 0x46, 0x17, 0x61, 0x80, 0x36, - 0xa1, 0x5c, 0x11, 0xc7, 0x92, 0x52, 0x6d, 0x5d, 0x65, 0x50, 0x2c, 0xb0, 0xf6, 0x5f, 0x2c, 0x68, - 0xbd, 0x4c, 0xef, 0xc3, 0x04, 0x55, 0x60, 0xf0, 0x8e, 0xe3, 0x46, 0xae, 0xb7, 0x2d, 0xe4, 0x8f, - 0xa7, 0x3a, 0x9e, 0x51, 0xac, 0xd0, 0x6d, 0x5e, 0x80, 0x9f, 0xa2, 0xe2, 0x0f, 0x96, 0x6c, 0x28, - 0xc7, 0xa0, 0xed, 0x79, 0x94, 0x63, 0xa1, 0x57, 0x8e, 0x98, 0x17, 0xe0, 0x1c, 0xc5, 0x1f, 0x2c, - 0xd9, 0xa0, 0x37, 0x00, 0xe4, 0x0a, 0x23, 0x75, 0xe1, 0x9d, 0xf2, 0x4c, 0x77, 0xa6, 0x1b, 0xaa, - 0xcc, 0xe2, 0x18, 0x3d, 0xa3, 0xe3, 0xff, 0x58, 0xe3, 0x67, 0x47, 0x4c, 0x4e, 0x4b, 0x37, 0x06, - 0x7d, 0x9c, 0x4e, 0x71, 0x27, 0x88, 0x48, 0x7d, 0x21, 0x12, 0x9d, 0xf3, 0xde, 0xde, 0x2e, 0x29, - 0x1b, 0x6e, 0x93, 0xe8, 0xcb, 0x41, 0x30, 0xc1, 0x31, 0x3f, 0xfb, 0xe7, 0x8b, 0x30, 0x9d, 0xd7, - 0x5c, 0x3a, 0xe9, 0xc8, 0x5d, 0x37, 0x5a, 0xa2, 0xe2, 0x95, 0x65, 0x4e, 0xba, 0x15, 0x01, 0xc7, - 0x8a, 0x82, 0x8e, 0x7e, 0xe8, 0x6e, 0xcb, 0x3b, 0x66, 0x7f, 0x3c, 0xfa, 0x55, 0x06, 0xc5, 0x02, - 0x4b, 0xe9, 0x02, 0xe2, 0x84, 0xc2, 0xfd, 0x49, 0x9b, 0x25, 0x98, 0x41, 0xb1, 0xc0, 0xea, 0xda, - 0xae, 0xbe, 0x2e, 0xda, 0x2e, 0xa3, 0x8b, 0xfa, 0x8f, 0xb7, 0x8b, 0xd0, 0x27, 0x01, 0xb6, 0x5c, - 0xcf, 0x0d, 0x77, 0x18, 0xf7, 0x81, 0x23, 0x73, 0x57, 0xc2, 0xd9, 0xaa, 0xe2, 0x82, 0x35, 0x8e, - 0xe8, 0x45, 0x18, 0x56, 0x0b, 0xb0, 0xbc, 0xcc, 0x6c, 0xc1, 0x9a, 0x6f, 0x4d, 0xbc, 0x1b, 0x2d, - 0x63, 0x9d, 0xce, 0xfe, 0x74, 0x72, 0xbe, 0x88, 0x15, 0xa0, 0xf5, 0xaf, 0xd5, 0x6b, 0xff, 0x16, - 0x3a, 0xf7, 0xaf, 0xfd, 0x8d, 0x22, 0x8c, 0x1b, 0x95, 0xb5, 0xc3, 0x1e, 0xf6, 0xac, 0x2b, 0x74, - 0x03, 0x77, 0x22, 0x22, 0xd6, 0x9f, 0xdd, 0x7d, 0xa9, 0xe8, 0x9b, 0x3c, 0x5d, 0x01, 0xbc, 0x3c, - 0xfa, 0x24, 0x94, 0x1a, 0x4e, 0xc8, 0x34, 0x67, 0x44, 0xac, 0xbb, 0x5e, 0x98, 0xc5, 0x17, 0x13, - 0x27, 0x8c, 0xb4, 0x53, 0x93, 0xf3, 0x8e, 0x59, 0xd2, 0x93, 0x86, 0xca, 0x27, 0xd2, 0xbf, 0x4e, - 0x35, 0x82, 0x0a, 0x31, 0xfb, 0x98, 0xe3, 0xd0, 0x4b, 0x30, 0x12, 0x10, 0x36, 0x2b, 0x96, 0xa8, - 0x34, 0xc7, 0xa6, 0x59, 0x7f, 0x2c, 0xf6, 0x61, 0x0d, 0x87, 0x0d, 0xca, 0xf8, 0x6e, 0x30, 0xd0, - 0xe1, 0x6e, 0xf0, 0x14, 0x0c, 0xb2, 0x1f, 0x6a, 0x06, 0xa8, 0xd1, 0x28, 0x73, 0x30, 0x96, 0xf8, - 0xe4, 0x84, 0x19, 0xea, 0x6d, 0xc2, 0xd0, 0xdb, 0x87, 0x98, 0xd4, 0xcc, 0x0e, 0x3f, 0xc4, 0x77, - 0x39, 0x31, 0xe5, 0xb1, 0xc4, 0xd9, 0xef, 0x85, 0xb1, 0x65, 0x87, 0x34, 0x7d, 0x6f, 0xc5, 0xab, - 0xb7, 0x7c, 0xd7, 0x8b, 0xd0, 0x34, 0xf4, 0xb1, 0x43, 0x84, 0x6f, 0x01, 0x7d, 0xb4, 0x22, 0xcc, - 0x20, 0xf6, 0x36, 0x9c, 0x5e, 0xf6, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0xbe, 0x50, 0x29, 0x6b, 0xf7, - 0xeb, 0x75, 0x79, 0xbf, 0xe3, 0x6e, 0x6d, 0x99, 0x5b, 0xaf, 0x56, 0x92, 0x8b, 0xb5, 0xab, 0x6e, - 0x83, 0xe4, 0x68, 0x41, 0xfe, 0x4a, 0xc1, 0xa8, 0x29, 0xa6, 0x57, 0x76, 0x38, 0x2b, 0xd7, 0x0e, - 0xf7, 0x3a, 0x0c, 0x6d, 0xb9, 0xa4, 0x51, 0xc7, 0x64, 0x4b, 0xcc, 0xc4, 0x27, 0xf3, 0x3d, 0x75, - 0x56, 0x29, 0xa5, 0xd4, 0x7a, 0xf1, 0xdb, 0xe1, 0xaa, 0x28, 0x8c, 0x15, 0x1b, 0xb4, 0x0b, 0x13, - 0xf2, 0xc2, 0x20, 0xb1, 0x62, 0x5e, 0x3e, 0xd5, 0xe9, 0x16, 0x62, 0x32, 0x3f, 0x75, 0xef, 0x60, - 0x76, 0x02, 0x27, 0xd8, 0xe0, 0x14, 0x63, 0x7a, 0x1d, 0x6c, 0xd2, 0x1d, 0xb8, 0x8f, 0x75, 0x3f, - 0xbb, 0x0e, 0xb2, 0x9b, 0x2d, 0x83, 0xda, 0x3f, 0x66, 0xc1, 0x43, 0xa9, 0x9e, 0x11, 0x37, 0xfc, - 0x63, 0x1e, 0x85, 0xe4, 0x8d, 0xbb, 0xd0, 0xfd, 0xc6, 0x6d, 0xff, 0x2d, 0x0b, 0x4e, 0xad, 0x34, - 0x5b, 0xd1, 0xfe, 0xb2, 0x6b, 0x1a, 0xcd, 0x3e, 0x00, 0x03, 0x4d, 0x52, 0x77, 0xdb, 0x4d, 0x31, - 0x72, 0xb3, 0x72, 0x97, 0x5a, 0x63, 0xd0, 0xc3, 0x83, 0xd9, 0xd1, 0x6a, 0xe4, 0x07, 0xce, 0x36, - 0xe1, 0x00, 0x2c, 0xc8, 0xd9, 0x5e, 0xef, 0xbe, 0x45, 0xae, 0xbb, 0x4d, 0x57, 0x7a, 0x5e, 0x75, - 0xd4, 0xd9, 0xcd, 0xc9, 0x0e, 0x9d, 0x7b, 0xbd, 0xed, 0x78, 0x91, 0x1b, 0xed, 0x0b, 0x7b, 0x97, - 0x64, 0x82, 0x63, 0x7e, 0xf6, 0xd7, 0x2d, 0x18, 0x97, 0xf3, 0x7e, 0xa1, 0x5e, 0x0f, 0x48, 0x18, - 0xa2, 0x19, 0x28, 0xb8, 0x2d, 0xd1, 0x4a, 0x10, 0xad, 0x2c, 0x94, 0x2b, 0xb8, 0xe0, 0xb6, 0xa4, - 0x58, 0xc6, 0x36, 0xc2, 0xa2, 0x69, 0xfa, 0xbb, 0x2a, 0xe0, 0x58, 0x51, 0xa0, 0x4b, 0x30, 0xe4, - 0xf9, 0x75, 0x6e, 0xe7, 0xe2, 0x47, 0x1a, 0x9b, 0x60, 0xeb, 0x02, 0x86, 0x15, 0x16, 0x55, 0xa0, - 0xc4, 0x1d, 0xc3, 0xe2, 0x49, 0xdb, 0x93, 0x7b, 0x19, 0xfb, 0xb2, 0x0d, 0x59, 0x12, 0xc7, 0x4c, - 0xec, 0x5f, 0xb1, 0x60, 0x44, 0x7e, 0x59, 0x8f, 0x32, 0x27, 0x5d, 0x5a, 0xb1, 0xbc, 0x19, 0x2f, - 0x2d, 0x2a, 0x33, 0x32, 0x8c, 0x21, 0x2a, 0x16, 0x8f, 0x24, 0x2a, 0x5e, 0x86, 0x61, 0xa7, 0xd5, - 0xaa, 0x98, 0x72, 0x26, 0x9b, 0x4a, 0x0b, 0x31, 0x18, 0xeb, 0x34, 0xf6, 0x8f, 0x16, 0x60, 0x4c, - 0x7e, 0x41, 0xb5, 0xbd, 0x19, 0x92, 0x08, 0x6d, 0x40, 0xc9, 0xe1, 0xa3, 0x44, 0xe4, 0x24, 0x7f, - 0x2c, 0x5b, 0x8f, 0x60, 0x0c, 0x69, 0x7c, 0xe0, 0x2f, 0xc8, 0xd2, 0x38, 0x66, 0x84, 0x1a, 0x30, - 0xe9, 0xf9, 0x11, 0xdb, 0xfc, 0x15, 0xbe, 0x93, 0x69, 0x27, 0xc9, 0xfd, 0xac, 0xe0, 0x3e, 0xb9, - 0x9e, 0xe4, 0x82, 0xd3, 0x8c, 0xd1, 0x8a, 0xd4, 0xcd, 0x14, 0xf3, 0x95, 0x01, 0xfa, 0xc0, 0x65, - 0xab, 0x66, 0xec, 0x5f, 0xb2, 0xa0, 0x24, 0xc9, 0x4e, 0xc2, 0x8a, 0xb7, 0x06, 0x83, 0x21, 0x1b, - 0x04, 0xd9, 0x35, 0x76, 0xa7, 0x86, 0xf3, 0xf1, 0x8a, 0xcf, 0x34, 0xfe, 0x3f, 0xc4, 0x92, 0x07, - 0x53, 0xcd, 0xab, 0xe6, 0xbf, 0x43, 0x54, 0xf3, 0xaa, 0x3d, 0x39, 0x87, 0xd2, 0x1f, 0xb0, 0x36, - 0x6b, 0xba, 0x2e, 0x2a, 0x7a, 0xb5, 0x02, 0xb2, 0xe5, 0xde, 0x4d, 0x8a, 0x5e, 0x15, 0x06, 0xc5, - 0x02, 0x8b, 0xde, 0x80, 0x91, 0x9a, 0xd4, 0xc9, 0xc6, 0x2b, 0xfc, 0x62, 0x47, 0xfb, 0x80, 0x32, - 0x25, 0x71, 0x5d, 0xc8, 0x92, 0x56, 0x1e, 0x1b, 0xdc, 0x4c, 0xc7, 0x87, 0x62, 0x37, 0xc7, 0x87, - 0x98, 0x6f, 0xbe, 0x1b, 0xc0, 0x8f, 0x5b, 0x30, 0xc0, 0x75, 0x71, 0xbd, 0xa9, 0x42, 0x35, 0xcb, - 0x5a, 0xdc, 0x77, 0xb7, 0x28, 0x50, 0x58, 0xca, 0xd0, 0x1a, 0x94, 0xd8, 0x0f, 0xa6, 0x4b, 0x2c, - 0xe6, 0xbf, 0x4b, 0xe0, 0xb5, 0xea, 0x0d, 0xbc, 0x25, 0x8b, 0xe1, 0x98, 0x83, 0xfd, 0x23, 0x45, - 0xba, 0xbb, 0xc5, 0xa4, 0xc6, 0xa1, 0x6f, 0x3d, 0xb8, 0x43, 0xbf, 0xf0, 0xa0, 0x0e, 0xfd, 0x6d, - 0x18, 0xaf, 0x69, 0x76, 0xb8, 0x78, 0x24, 0x2f, 0x75, 0x9c, 0x24, 0x9a, 0xc9, 0x8e, 0x6b, 0x59, - 0x96, 0x4c, 0x26, 0x38, 0xc9, 0x15, 0x7d, 0x1c, 0x46, 0xf8, 0x38, 0x8b, 0x5a, 0xb8, 0xef, 0xc8, - 0x13, 0xf9, 0xf3, 0x45, 0xaf, 0x82, 0x6b, 0xe5, 0xb4, 0xe2, 0xd8, 0x60, 0x66, 0xff, 0xb1, 0x05, - 0x68, 0xa5, 0xb5, 0x43, 0x9a, 0x24, 0x70, 0x1a, 0xb1, 0x3a, 0xfd, 0x07, 0x2d, 0x98, 0x26, 0x29, - 0xf0, 0x92, 0xdf, 0x6c, 0x8a, 0x4b, 0x4b, 0xce, 0xbd, 0x7a, 0x25, 0xa7, 0x8c, 0x7a, 0xb8, 0x31, - 0x9d, 0x47, 0x81, 0x73, 0xeb, 0x43, 0x6b, 0x30, 0xc5, 0x4f, 0x49, 0x85, 0xd0, 0xfc, 0x50, 0x1e, - 0x16, 0x8c, 0xa7, 0x36, 0xd2, 0x24, 0x38, 0xab, 0x9c, 0xfd, 0x3d, 0x23, 0x90, 0xdb, 0x8a, 0x77, - 0xed, 0x08, 0xef, 0xda, 0x11, 0xde, 0xb5, 0x23, 0xbc, 0x6b, 0x47, 0x78, 0xd7, 0x8e, 0xf0, 0x6d, - 0x6f, 0x47, 0xf8, 0x4b, 0x16, 0x9c, 0x56, 0xc7, 0x80, 0x71, 0xf1, 0xfd, 0x0c, 0x4c, 0xf1, 0xe5, - 0x66, 0xf8, 0x2e, 0x8a, 0x63, 0xef, 0x72, 0xe6, 0xcc, 0x4d, 0xf8, 0xd8, 0x1a, 0x05, 0xf9, 0x63, - 0x85, 0x0c, 0x04, 0xce, 0xaa, 0xc6, 0xfe, 0xf9, 0x21, 0xe8, 0x5f, 0xd9, 0x23, 0x5e, 0x74, 0x02, - 0x57, 0x84, 0x1a, 0x8c, 0xb9, 0xde, 0x9e, 0xdf, 0xd8, 0x23, 0x75, 0x8e, 0x3f, 0xca, 0x4d, 0xf6, - 0x8c, 0x60, 0x3d, 0x56, 0x36, 0x58, 0xe0, 0x04, 0xcb, 0x07, 0xa1, 0x4d, 0xbe, 0x02, 0x03, 0x7c, - 0x13, 0x17, 0xaa, 0xe4, 0xcc, 0x3d, 0x9b, 0x75, 0xa2, 0x38, 0x9a, 0x62, 0x4d, 0x37, 0x3f, 0x24, - 0x44, 0x71, 0xf4, 0x69, 0x18, 0xdb, 0x72, 0x83, 0x30, 0xda, 0x70, 0x9b, 0x24, 0x8c, 0x9c, 0x66, - 0xeb, 0x3e, 0xb4, 0xc7, 0xaa, 0x1f, 0x56, 0x0d, 0x4e, 0x38, 0xc1, 0x19, 0x6d, 0xc3, 0x68, 0xc3, - 0xd1, 0xab, 0x1a, 0x3c, 0x72, 0x55, 0xea, 0x74, 0xb8, 0xae, 0x33, 0xc2, 0x26, 0x5f, 0xba, 0x9c, - 0x6a, 0x4c, 0x01, 0x3a, 0xc4, 0xd4, 0x02, 0x6a, 0x39, 0x71, 0xcd, 0x27, 0xc7, 0x51, 0x41, 0x87, - 0x39, 0xc8, 0x96, 0x4c, 0x41, 0x47, 0x73, 0x83, 0xfd, 0x14, 0x94, 0x08, 0xed, 0x42, 0xca, 0x58, - 0x1c, 0x30, 0xf3, 0xbd, 0xb5, 0x75, 0xcd, 0xad, 0x05, 0xbe, 0xa9, 0xb7, 0x5f, 0x91, 0x9c, 0x70, - 0xcc, 0x14, 0x2d, 0xc1, 0x40, 0x48, 0x02, 0x97, 0x84, 0xe2, 0xa8, 0xe9, 0x30, 0x8c, 0x8c, 0x8c, - 0xbf, 0x86, 0xe1, 0xbf, 0xb1, 0x28, 0x4a, 0xa7, 0x97, 0xc3, 0x54, 0x9a, 0xec, 0x30, 0xd0, 0xa6, - 0xd7, 0x02, 0x83, 0x62, 0x81, 0x45, 0xaf, 0xc1, 0x60, 0x40, 0x1a, 0xcc, 0x30, 0x34, 0xda, 0xfb, - 0x24, 0xe7, 0x76, 0x26, 0x5e, 0x0e, 0x4b, 0x06, 0xe8, 0x1a, 0xa0, 0x80, 0x50, 0x41, 0xc9, 0xf5, - 0xb6, 0x95, 0xdb, 0xa8, 0xd8, 0x68, 0x95, 0x40, 0x8a, 0x63, 0x0a, 0xf9, 0x10, 0x0a, 0x67, 0x14, - 0x43, 0x57, 0x60, 0x52, 0x41, 0xcb, 0x5e, 0x18, 0x39, 0x74, 0x83, 0x1b, 0x67, 0xbc, 0x94, 0x9e, - 0x02, 0x27, 0x09, 0x70, 0xba, 0x8c, 0xfd, 0x25, 0x0b, 0x78, 0x3f, 0x9f, 0xc0, 0xed, 0xfc, 0x55, - 0xf3, 0x76, 0x7e, 0x36, 0x77, 0xe4, 0x72, 0x6e, 0xe6, 0x5f, 0xb2, 0x60, 0x58, 0x1b, 0xd9, 0x78, - 0xce, 0x5a, 0x1d, 0xe6, 0x6c, 0x1b, 0x26, 0xe8, 0x4c, 0xbf, 0xb1, 0x19, 0x92, 0x60, 0x8f, 0xd4, - 0xd9, 0xc4, 0x2c, 0xdc, 0xdf, 0xc4, 0x54, 0x2e, 0x6a, 0xd7, 0x13, 0x0c, 0x71, 0xaa, 0x0a, 0xfb, - 0x53, 0xb2, 0xa9, 0xca, 0xa3, 0xaf, 0xa6, 0xc6, 0x3c, 0xe1, 0xd1, 0xa7, 0x46, 0x15, 0xc7, 0x34, - 0x74, 0xa9, 0xed, 0xf8, 0x61, 0x94, 0xf4, 0xe8, 0xbb, 0xea, 0x87, 0x11, 0x66, 0x18, 0xfb, 0x79, - 0x80, 0x95, 0xbb, 0xa4, 0xc6, 0x67, 0xac, 0x7e, 0x79, 0xb0, 0xf2, 0x2f, 0x0f, 0xf6, 0x6f, 0x5a, - 0x30, 0xb6, 0xba, 0x64, 0x9c, 0x5c, 0x73, 0x00, 0xfc, 0xc6, 0x73, 0xfb, 0xf6, 0xba, 0x34, 0x87, - 0x73, 0x8b, 0xa6, 0x82, 0x62, 0x8d, 0x02, 0x9d, 0x85, 0x62, 0xa3, 0xed, 0x09, 0xf5, 0xe1, 0x20, - 0x3d, 0x1e, 0xaf, 0xb7, 0x3d, 0x4c, 0x61, 0xda, 0x23, 0x88, 0x62, 0xcf, 0x8f, 0x20, 0xba, 0x06, - 0x3f, 0x40, 0xb3, 0xd0, 0x7f, 0xe7, 0x8e, 0x5b, 0xe7, 0x4f, 0x4c, 0x85, 0xa9, 0xfe, 0xf6, 0xed, - 0xf2, 0x72, 0x88, 0x39, 0xdc, 0xfe, 0x42, 0x11, 0x66, 0x56, 0x1b, 0xe4, 0xee, 0xdb, 0x7c, 0x66, - 0xdb, 0xeb, 0x13, 0x8e, 0xa3, 0x29, 0x62, 0x8e, 0xfa, 0x4c, 0xa7, 0x7b, 0x7f, 0x6c, 0xc1, 0x20, - 0x77, 0x68, 0x93, 0x8f, 0x6e, 0x5f, 0xc9, 0xaa, 0x3d, 0xbf, 0x43, 0xe6, 0xb8, 0x63, 0x9c, 0x78, - 0xc3, 0xa7, 0x0e, 0x4c, 0x01, 0xc5, 0x92, 0xf9, 0xcc, 0xcb, 0x30, 0xa2, 0x53, 0x1e, 0xe9, 0xc1, - 0xdc, 0x9f, 0x2b, 0xc2, 0x04, 0x6d, 0xc1, 0x03, 0x1d, 0x88, 0x9b, 0xe9, 0x81, 0x38, 0xee, 0x47, - 0x53, 0xdd, 0x47, 0xe3, 0x8d, 0xe4, 0x68, 0x5c, 0xce, 0x1b, 0x8d, 0x93, 0x1e, 0x83, 0xef, 0xb6, - 0x60, 0x6a, 0xb5, 0xe1, 0xd7, 0x76, 0x13, 0x0f, 0x9b, 0x5e, 0x84, 0x61, 0xba, 0x1d, 0x87, 0xc6, - 0x1b, 0x7f, 0x23, 0xea, 0x83, 0x40, 0x61, 0x9d, 0x4e, 0x2b, 0x76, 0xf3, 0x66, 0x79, 0x39, 0x2b, - 0x58, 0x84, 0x40, 0x61, 0x9d, 0xce, 0xfe, 0x75, 0x0b, 0xce, 0x5d, 0x59, 0x5a, 0x89, 0xa7, 0x62, - 0x2a, 0x5e, 0xc5, 0x45, 0x18, 0x68, 0xd5, 0xb5, 0xa6, 0xc4, 0xea, 0xd5, 0x65, 0xd6, 0x0a, 0x81, - 0x7d, 0xa7, 0xc4, 0x62, 0xb9, 0x09, 0x70, 0x05, 0x57, 0x96, 0xc4, 0xbe, 0x2b, 0xad, 0x29, 0x56, - 0xae, 0x35, 0xe5, 0x09, 0x18, 0xa4, 0xe7, 0x82, 0x5b, 0x93, 0xed, 0xe6, 0x06, 0x5a, 0x0e, 0xc2, - 0x12, 0x67, 0xff, 0x8c, 0x05, 0x53, 0x57, 0xdc, 0x88, 0x1e, 0xda, 0xc9, 0x80, 0x0c, 0xf4, 0xd4, - 0x0e, 0xdd, 0xc8, 0x0f, 0xf6, 0x93, 0x01, 0x19, 0xb0, 0xc2, 0x60, 0x8d, 0x8a, 0x7f, 0xd0, 0x9e, - 0xcb, 0x3c, 0xb4, 0x0b, 0xa6, 0xfd, 0x0a, 0x0b, 0x38, 0x56, 0x14, 0xb4, 0xbf, 0xea, 0x6e, 0xc0, - 0x54, 0x7f, 0xfb, 0x62, 0xe3, 0x56, 0xfd, 0xb5, 0x2c, 0x11, 0x38, 0xa6, 0xb1, 0xff, 0xd0, 0x82, - 0xd9, 0x2b, 0x8d, 0x76, 0x18, 0x91, 0x60, 0x2b, 0xcc, 0xd9, 0x74, 0x9f, 0x87, 0x12, 0x91, 0x8a, - 0x76, 0xf9, 0x94, 0x4c, 0x0a, 0xa2, 0x4a, 0x03, 0xcf, 0xe3, 0x42, 0x28, 0xba, 0x1e, 0x5e, 0x5f, - 0x1e, 0xed, 0xf9, 0xdc, 0x2a, 0x20, 0xa2, 0xd7, 0xa5, 0x07, 0xca, 0x60, 0x2f, 0xee, 0x57, 0x52, - 0x58, 0x9c, 0x51, 0xc2, 0xfe, 0x31, 0x0b, 0x4e, 0xab, 0x0f, 0x7e, 0xc7, 0x7d, 0xa6, 0xfd, 0xd5, - 0x02, 0x8c, 0x5e, 0xdd, 0xd8, 0xa8, 0x5c, 0x21, 0x91, 0x36, 0x2b, 0x3b, 0x9b, 0xcf, 0xb1, 0x66, - 0x05, 0xec, 0x74, 0x47, 0x6c, 0x47, 0x6e, 0x63, 0x8e, 0xc7, 0x5b, 0x9a, 0x2b, 0x7b, 0xd1, 0x8d, - 0xa0, 0x1a, 0x05, 0xae, 0xb7, 0x9d, 0x39, 0xd3, 0xa5, 0xcc, 0x52, 0xcc, 0x93, 0x59, 0xd0, 0xf3, - 0x30, 0xc0, 0x02, 0x3e, 0xc9, 0x41, 0x78, 0x58, 0x5d, 0xb1, 0x18, 0xf4, 0xf0, 0x60, 0xb6, 0x74, - 0x13, 0x97, 0xf9, 0x1f, 0x2c, 0x48, 0xd1, 0x4d, 0x18, 0xde, 0x89, 0xa2, 0xd6, 0x55, 0xe2, 0xd4, - 0x49, 0x20, 0x77, 0xd9, 0xf3, 0x59, 0xbb, 0x2c, 0xed, 0x04, 0x4e, 0x16, 0x6f, 0x4c, 0x31, 0x2c, - 0xc4, 0x3a, 0x1f, 0xbb, 0x0a, 0x10, 0xe3, 0x8e, 0xc9, 0x00, 0x62, 0x6f, 0x40, 0x89, 0x7e, 0xee, - 0x42, 0xc3, 0x75, 0x3a, 0x9b, 0x98, 0x9f, 0x86, 0x92, 0x34, 0x20, 0x87, 0xe2, 0x75, 0x38, 0x3b, - 0x91, 0xa4, 0x7d, 0x39, 0xc4, 0x31, 0xde, 0xde, 0x82, 0x53, 0xcc, 0x1d, 0xd0, 0x89, 0x76, 0x8c, - 0xd9, 0xd7, 0x7d, 0x98, 0x9f, 0x11, 0x37, 0x36, 0xde, 0xe6, 0x69, 0xed, 0x39, 0xe3, 0x88, 0xe4, - 0x18, 0xdf, 0xde, 0xec, 0x6f, 0xf4, 0xc1, 0xc3, 0xe5, 0x6a, 0x7e, 0xc0, 0x92, 0x97, 0x60, 0x84, - 0x0b, 0x82, 0x74, 0xd0, 0x9d, 0x86, 0xa8, 0x57, 0xe9, 0x36, 0x37, 0x34, 0x1c, 0x36, 0x28, 0xd1, - 0x39, 0x28, 0xba, 0x6f, 0x7a, 0xc9, 0xc7, 0x3e, 0xe5, 0xd7, 0xd7, 0x31, 0x85, 0x53, 0x34, 0x95, - 0x29, 0xf9, 0x66, 0xad, 0xd0, 0x4a, 0xae, 0x7c, 0x15, 0xc6, 0xdc, 0xb0, 0x16, 0xba, 0x65, 0x8f, - 0xae, 0x40, 0x6d, 0x0d, 0x2b, 0x6d, 0x02, 0x6d, 0xb4, 0xc2, 0xe2, 0x04, 0xb5, 0x76, 0x72, 0xf4, - 0xf7, 0x2c, 0x97, 0x76, 0x7d, 0x2e, 0x4d, 0x37, 0xf6, 0x16, 0xfb, 0xba, 0x90, 0x29, 0xa9, 0xc5, - 0xc6, 0xce, 0x3f, 0x38, 0xc4, 0x12, 0x47, 0xaf, 0x6a, 0xb5, 0x1d, 0xa7, 0xb5, 0xd0, 0x8e, 0x76, - 0x96, 0xdd, 0xb0, 0xe6, 0xef, 0x91, 0x60, 0x9f, 0xdd, 0xb2, 0x87, 0xe2, 0xab, 0x9a, 0x42, 0x2c, - 0x5d, 0x5d, 0xa8, 0x50, 0x4a, 0x9c, 0x2e, 0x83, 0x16, 0x60, 0x5c, 0x02, 0xab, 0x24, 0x64, 0x9b, - 0xfb, 0x30, 0x63, 0xa3, 0x9e, 0xdf, 0x08, 0xb0, 0x62, 0x92, 0xa4, 0x37, 0x45, 0x57, 0x38, 0x0e, - 0xd1, 0xf5, 0x03, 0x30, 0xea, 0x7a, 0x6e, 0xe4, 0x3a, 0x91, 0xcf, 0x2d, 0x2c, 0xfc, 0x42, 0xcd, - 0x54, 0xc7, 0x65, 0x1d, 0x81, 0x4d, 0x3a, 0xfb, 0x3f, 0xf5, 0xc1, 0x24, 0x1b, 0xb6, 0x77, 0x67, - 0xd8, 0xb7, 0xd3, 0x0c, 0xbb, 0x99, 0x9e, 0x61, 0xc7, 0x21, 0x93, 0xdf, 0xf7, 0x34, 0xfb, 0x34, - 0x94, 0xd4, 0x8b, 0x23, 0xf9, 0xe4, 0xd0, 0xca, 0x79, 0x72, 0xd8, 0xfd, 0x5c, 0x96, 0x4e, 0x5b, - 0xc5, 0x4c, 0xa7, 0xad, 0x2f, 0x5b, 0x10, 0x9b, 0x0c, 0xd0, 0xeb, 0x50, 0x6a, 0xf9, 0xcc, 0x17, - 0x31, 0x90, 0x0e, 0xbe, 0x8f, 0x77, 0xb4, 0x39, 0xf0, 0x98, 0x4d, 0x01, 0xef, 0x85, 0x8a, 0x2c, - 0x8a, 0x63, 0x2e, 0xe8, 0x1a, 0x0c, 0xb6, 0x02, 0x52, 0x8d, 0x58, 0x40, 0x91, 0xde, 0x19, 0xf2, - 0x59, 0xc3, 0x0b, 0x62, 0xc9, 0xc1, 0xfe, 0xcf, 0x16, 0x4c, 0x24, 0x49, 0xd1, 0x07, 0xa1, 0x8f, - 0xdc, 0x25, 0x35, 0xd1, 0xde, 0xcc, 0x43, 0x36, 0x56, 0x3a, 0xf0, 0x0e, 0xa0, 0xff, 0x31, 0x2b, - 0x85, 0xae, 0xc2, 0x20, 0x3d, 0x61, 0xaf, 0xa8, 0xe0, 0x59, 0x8f, 0xe6, 0x9d, 0xd2, 0x4a, 0x54, - 0xe1, 0x8d, 0x13, 0x20, 0x2c, 0x8b, 0x33, 0x4f, 0xa9, 0x5a, 0xab, 0x4a, 0x2f, 0x2f, 0x51, 0xa7, - 0x3b, 0xf6, 0xc6, 0x52, 0x85, 0x13, 0x09, 0x6e, 0xdc, 0x53, 0x4a, 0x02, 0x71, 0xcc, 0xc4, 0xfe, - 0x59, 0x0b, 0x80, 0x3b, 0x86, 0x39, 0xde, 0x36, 0x39, 0x01, 0x3d, 0xf9, 0x32, 0xf4, 0x85, 0x2d, - 0x52, 0xeb, 0xe4, 0x26, 0x1b, 0xb7, 0xa7, 0xda, 0x22, 0xb5, 0x78, 0xc6, 0xd1, 0x7f, 0x98, 0x95, - 0xb6, 0xbf, 0x17, 0x60, 0x2c, 0x26, 0x2b, 0x47, 0xa4, 0x89, 0x9e, 0x35, 0xc2, 0x14, 0x9c, 0x4d, - 0x84, 0x29, 0x28, 0x31, 0x6a, 0x4d, 0x25, 0xfb, 0x69, 0x28, 0x36, 0x9d, 0xbb, 0x42, 0xe7, 0xf6, - 0x74, 0xe7, 0x66, 0x50, 0xfe, 0x73, 0x6b, 0xce, 0x5d, 0x7e, 0x2d, 0x7d, 0x5a, 0xae, 0x90, 0x35, - 0xe7, 0xee, 0x21, 0x77, 0x86, 0x65, 0xbb, 0xf4, 0x75, 0x37, 0x8c, 0x3e, 0xf7, 0x1f, 0xe3, 0xff, - 0x6c, 0xdd, 0xd1, 0x4a, 0x58, 0x5d, 0xae, 0x27, 0x7c, 0x9e, 0x7a, 0xaa, 0xcb, 0xf5, 0x92, 0x75, - 0xb9, 0x5e, 0x0f, 0x75, 0xb9, 0x1e, 0x7a, 0x0b, 0x06, 0x85, 0x4b, 0xa2, 0x08, 0x64, 0x34, 0xdf, - 0x43, 0x7d, 0xc2, 0xa3, 0x91, 0xd7, 0x39, 0x2f, 0xaf, 0xdd, 0x02, 0xda, 0xb5, 0x5e, 0x59, 0x21, - 0xfa, 0xcb, 0x16, 0x8c, 0x89, 0xdf, 0x98, 0xbc, 0xd9, 0x26, 0x61, 0x24, 0xc4, 0xd2, 0xf7, 0xf7, - 0xde, 0x06, 0x51, 0x90, 0x37, 0xe5, 0xfd, 0xf2, 0x9c, 0x31, 0x91, 0x5d, 0x5b, 0x94, 0x68, 0x05, - 0xfa, 0x3b, 0x16, 0x9c, 0x6a, 0x3a, 0x77, 0x79, 0x8d, 0x1c, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x69, - 0xff, 0x83, 0xbd, 0x0d, 0x7f, 0xaa, 0x38, 0x6f, 0xa4, 0xb4, 0x3f, 0x9e, 0xca, 0x22, 0xe9, 0xda, - 0xd4, 0xcc, 0x76, 0xcd, 0x6c, 0xc1, 0x90, 0x9c, 0x6f, 0x19, 0xca, 0x8d, 0x65, 0x5d, 0xe6, 0x3e, - 0xb2, 0x47, 0xa8, 0xfe, 0xfc, 0x9f, 0xd6, 0x23, 0xe6, 0xda, 0x03, 0xad, 0xe7, 0xd3, 0x30, 0xa2, - 0xcf, 0xb1, 0x07, 0x5a, 0xd7, 0x9b, 0x30, 0x95, 0x31, 0x97, 0x1e, 0x68, 0x95, 0x77, 0xe0, 0x6c, - 0xee, 0xfc, 0x78, 0x90, 0x15, 0xdb, 0x5f, 0xb5, 0xf4, 0x7d, 0xf0, 0x04, 0x8c, 0x15, 0x4b, 0xa6, - 0xb1, 0xe2, 0x7c, 0xe7, 0x95, 0x93, 0x63, 0xb1, 0x78, 0x43, 0x6f, 0x34, 0xdd, 0xd5, 0xd1, 0x6b, - 0x30, 0xd0, 0xa0, 0x10, 0xe9, 0xd8, 0x6a, 0x77, 0x5f, 0x91, 0xb1, 0x30, 0xc9, 0xe0, 0x21, 0x16, - 0x1c, 0xec, 0x5f, 0xb0, 0xa0, 0xef, 0x04, 0x7a, 0x02, 0x9b, 0x3d, 0xf1, 0x6c, 0x2e, 0x6b, 0x11, - 0xd3, 0x79, 0x0e, 0x3b, 0x77, 0x56, 0xee, 0x46, 0xc4, 0x0b, 0xd9, 0x89, 0x9c, 0xd9, 0x31, 0x3f, - 0x69, 0xc1, 0xd4, 0x75, 0xdf, 0xa9, 0x2f, 0x3a, 0x0d, 0xc7, 0xab, 0x91, 0xa0, 0xec, 0x6d, 0x1f, - 0xc9, 0x2b, 0xbb, 0xd0, 0xd5, 0x2b, 0x7b, 0x49, 0x3a, 0x35, 0xf5, 0xe5, 0x8f, 0x1f, 0x95, 0xa4, - 0x93, 0x81, 0x5b, 0x0c, 0xf7, 0xdb, 0x1d, 0x40, 0x7a, 0x2b, 0xc5, 0x1b, 0x19, 0x0c, 0x83, 0x2e, - 0x6f, 0xaf, 0x18, 0xc4, 0x27, 0xb3, 0x25, 0xdc, 0xd4, 0xe7, 0x69, 0xaf, 0x3f, 0x38, 0x00, 0x4b, - 0x46, 0xf6, 0x4b, 0x90, 0xf9, 0xd0, 0xbe, 0xbb, 0x5e, 0xc2, 0xfe, 0x28, 0x4c, 0xb2, 0x92, 0x47, - 0xd4, 0x0c, 0xd8, 0x09, 0x6d, 0x6a, 0x46, 0xd0, 0x40, 0xfb, 0xf3, 0x16, 0x8c, 0xaf, 0x27, 0x62, - 0xa9, 0x5d, 0x64, 0xf6, 0xd7, 0x0c, 0x25, 0x7e, 0x95, 0x41, 0xb1, 0xc0, 0x1e, 0xbb, 0x92, 0xeb, - 0x4f, 0x2d, 0x88, 0x63, 0x5f, 0x9c, 0x80, 0xf8, 0xb6, 0x64, 0x88, 0x6f, 0x99, 0x82, 0xac, 0x6a, - 0x4e, 0x9e, 0xf4, 0x86, 0xae, 0xa9, 0xa8, 0x50, 0x1d, 0x64, 0xd8, 0x98, 0x0d, 0x9f, 0x8a, 0x63, - 0x66, 0xe8, 0x28, 0x19, 0x27, 0xca, 0xfe, 0xad, 0x02, 0x20, 0x45, 0xdb, 0x73, 0xd4, 0xaa, 0x74, - 0x89, 0xe3, 0x89, 0x5a, 0xb5, 0x07, 0x88, 0x79, 0x10, 0x04, 0x8e, 0x17, 0x72, 0xb6, 0xae, 0x50, - 0xeb, 0x1d, 0xcd, 0x3d, 0x61, 0x46, 0x54, 0x89, 0xae, 0xa7, 0xb8, 0xe1, 0x8c, 0x1a, 0x34, 0xcf, - 0x90, 0xfe, 0x5e, 0x3d, 0x43, 0x06, 0xba, 0xbc, 0x83, 0xfb, 0x8a, 0x05, 0xa3, 0xaa, 0x9b, 0xde, - 0x21, 0x5e, 0xea, 0xaa, 0x3d, 0x39, 0x1b, 0x68, 0x45, 0x6b, 0x32, 0x3b, 0x58, 0xbe, 0x93, 0xbd, - 0x67, 0x74, 0x1a, 0xee, 0x5b, 0x44, 0x45, 0x39, 0x9c, 0x15, 0xef, 0x13, 0x05, 0xf4, 0xf0, 0x60, - 0x76, 0x54, 0xfd, 0xe3, 0x51, 0x9c, 0xe3, 0x22, 0x74, 0x4b, 0x1e, 0x4f, 0x4c, 0x45, 0xf4, 0x22, - 0xf4, 0xb7, 0x76, 0x9c, 0x90, 0x24, 0x5e, 0xf3, 0xf4, 0x57, 0x28, 0xf0, 0xf0, 0x60, 0x76, 0x4c, - 0x15, 0x60, 0x10, 0xcc, 0xa9, 0x7b, 0x8f, 0x05, 0x96, 0x9e, 0x9c, 0x5d, 0x63, 0x81, 0xfd, 0xb1, - 0x05, 0x7d, 0xeb, 0x7e, 0xfd, 0x24, 0xb6, 0x80, 0x57, 0x8d, 0x2d, 0xe0, 0x91, 0xbc, 0x00, 0xfb, - 0xb9, 0xab, 0x7f, 0x35, 0xb1, 0xfa, 0xcf, 0xe7, 0x72, 0xe8, 0xbc, 0xf0, 0x9b, 0x30, 0xcc, 0xc2, - 0xf6, 0x8b, 0x97, 0x4b, 0xcf, 0x1b, 0x0b, 0x7e, 0x36, 0xb1, 0xe0, 0xc7, 0x35, 0x52, 0x6d, 0xa5, - 0x3f, 0x05, 0x83, 0xe2, 0x29, 0x4c, 0xf2, 0x59, 0xa8, 0xa0, 0xc5, 0x12, 0x6f, 0xff, 0x78, 0x11, - 0x8c, 0x34, 0x01, 0xe8, 0x97, 0x2c, 0x98, 0x0b, 0xb8, 0x8b, 0x6c, 0x7d, 0xb9, 0x1d, 0xb8, 0xde, - 0x76, 0xb5, 0xb6, 0x43, 0xea, 0xed, 0x86, 0xeb, 0x6d, 0x97, 0xb7, 0x3d, 0x5f, 0x81, 0x57, 0xee, - 0x92, 0x5a, 0x9b, 0x99, 0xdd, 0xba, 0xe4, 0x24, 0x50, 0xae, 0xe6, 0xcf, 0xdd, 0x3b, 0x98, 0x9d, - 0xc3, 0x47, 0xe2, 0x8d, 0x8f, 0xd8, 0x16, 0xf4, 0xeb, 0x16, 0xcc, 0xf3, 0xe8, 0xf9, 0xbd, 0xb7, - 0xbf, 0xc3, 0x6d, 0xb9, 0x22, 0x59, 0xc5, 0x4c, 0x36, 0x48, 0xd0, 0x5c, 0xfc, 0x80, 0xe8, 0xd0, - 0xf9, 0xca, 0xd1, 0xea, 0xc2, 0x47, 0x6d, 0x9c, 0xfd, 0x8f, 0x8b, 0x30, 0x2a, 0x62, 0x46, 0x89, - 0x33, 0xe0, 0x45, 0x63, 0x4a, 0x3c, 0x9a, 0x98, 0x12, 0x93, 0x06, 0xf1, 0xf1, 0x6c, 0xff, 0x21, - 0x4c, 0xd2, 0xcd, 0xf9, 0x2a, 0x71, 0x82, 0x68, 0x93, 0x38, 0xdc, 0xe1, 0xab, 0x78, 0xe4, 0xdd, - 0x5f, 0xe9, 0x27, 0xaf, 0x27, 0x99, 0xe1, 0x34, 0xff, 0x6f, 0xa7, 0x33, 0xc7, 0x83, 0x89, 0x54, - 0xd8, 0xaf, 0x8f, 0x41, 0x49, 0xbd, 0xe3, 0x10, 0x9b, 0x4e, 0xe7, 0xe8, 0x79, 0x49, 0x0e, 0x5c, - 0xfd, 0x15, 0xbf, 0x21, 0x8a, 0xd9, 0xd9, 0x7f, 0xaf, 0x60, 0x54, 0xc8, 0x07, 0x71, 0x1d, 0x86, - 0x9c, 0x30, 0x74, 0xb7, 0x3d, 0x52, 0xef, 0xa4, 0xa1, 0x4c, 0x55, 0xc3, 0xde, 0xd2, 0x2c, 0x88, - 0x92, 0x58, 0xf1, 0x40, 0x57, 0xb9, 0x5b, 0xdd, 0x1e, 0xe9, 0xa4, 0x9e, 0x4c, 0x71, 0x03, 0xe9, - 0x78, 0xb7, 0x47, 0xb0, 0x28, 0x8f, 0x3e, 0xc1, 0xfd, 0x1e, 0xaf, 0x79, 0xfe, 0x1d, 0xef, 0x8a, - 0xef, 0xcb, 0xb8, 0x0c, 0xbd, 0x31, 0x9c, 0x94, 0xde, 0x8e, 0xaa, 0x38, 0x36, 0xb9, 0xf5, 0x16, - 0x47, 0xf3, 0x33, 0xc0, 0xa2, 0x85, 0x9b, 0xcf, 0xa6, 0x43, 0x44, 0x60, 0x5c, 0x04, 0x24, 0x93, - 0x30, 0xd1, 0x77, 0x99, 0x57, 0x39, 0xb3, 0x74, 0xac, 0x48, 0xbf, 0x66, 0xb2, 0xc0, 0x49, 0x9e, - 0xf6, 0x4f, 0x5b, 0xc0, 0x9e, 0x90, 0x9e, 0x80, 0x3c, 0xf2, 0x21, 0x53, 0x1e, 0x99, 0xce, 0xeb, - 0xe4, 0x1c, 0x51, 0xe4, 0x05, 0x3e, 0xb3, 0x2a, 0x81, 0x7f, 0x77, 0x5f, 0x38, 0xab, 0x74, 0xbf, - 0x7f, 0xd8, 0xff, 0xc7, 0xe2, 0x9b, 0x98, 0x7a, 0x65, 0x81, 0x3e, 0x0b, 0x43, 0x35, 0xa7, 0xe5, - 0xd4, 0x78, 0x4e, 0x9b, 0x5c, 0x8d, 0x9e, 0x51, 0x68, 0x6e, 0x49, 0x94, 0xe0, 0x1a, 0x2a, 0x19, - 0xd8, 0x6e, 0x48, 0x82, 0xbb, 0x6a, 0xa5, 0x54, 0x95, 0x33, 0xbb, 0x30, 0x6a, 0x30, 0x7b, 0xa0, - 0xea, 0x8c, 0xcf, 0xf2, 0x23, 0x56, 0x05, 0x62, 0x6c, 0xc2, 0xa4, 0xa7, 0xfd, 0xa7, 0x07, 0x8a, - 0xbc, 0x5c, 0x3e, 0xde, 0xed, 0x10, 0x65, 0xa7, 0x8f, 0xf6, 0x3a, 0x35, 0xc1, 0x06, 0xa7, 0x39, - 0xdb, 0x3f, 0x61, 0xc1, 0x43, 0x3a, 0xa1, 0xf6, 0x00, 0xa6, 0x9b, 0x91, 0x64, 0x19, 0x86, 0xfc, - 0x16, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xa9, 0x71, 0x49, 0x76, 0xfa, 0x0d, 0x01, 0x3f, 0x14, 0x11, - 0xda, 0x25, 0x77, 0x09, 0xc7, 0xaa, 0x24, 0xbd, 0x7d, 0xb2, 0xce, 0x08, 0xc5, 0x53, 0x27, 0xb6, - 0x07, 0x30, 0x4b, 0x7a, 0x88, 0x05, 0xc6, 0xfe, 0x86, 0xc5, 0x27, 0x96, 0xde, 0x74, 0xf4, 0x26, - 0x4c, 0x34, 0x9d, 0xa8, 0xb6, 0xb3, 0x72, 0xb7, 0x15, 0x70, 0x93, 0x93, 0xec, 0xa7, 0xa7, 0xbb, - 0xf5, 0x93, 0xf6, 0x91, 0xb1, 0x2b, 0xe7, 0x5a, 0x82, 0x19, 0x4e, 0xb1, 0x47, 0x9b, 0x30, 0xcc, - 0x60, 0xec, 0x15, 0x5f, 0xd8, 0x49, 0x34, 0xc8, 0xab, 0x4d, 0x39, 0x23, 0xac, 0xc5, 0x7c, 0xb0, - 0xce, 0xd4, 0xfe, 0x72, 0x91, 0xaf, 0x76, 0x26, 0xca, 0x3f, 0x05, 0x83, 0x2d, 0xbf, 0xbe, 0x54, - 0x5e, 0xc6, 0x62, 0x14, 0xd4, 0x31, 0x52, 0xe1, 0x60, 0x2c, 0xf1, 0xe8, 0x12, 0x0c, 0x89, 0x9f, - 0xd2, 0x44, 0xc8, 0xf6, 0x66, 0x41, 0x17, 0x62, 0x85, 0x45, 0xcf, 0x01, 0xb4, 0x02, 0x7f, 0xcf, - 0xad, 0xb3, 0xe8, 0x12, 0x45, 0xd3, 0x8f, 0xa8, 0xa2, 0x30, 0x58, 0xa3, 0x42, 0xaf, 0xc0, 0x68, - 0xdb, 0x0b, 0xb9, 0x38, 0xa2, 0xc5, 0x92, 0x55, 0x1e, 0x2e, 0x37, 0x75, 0x24, 0x36, 0x69, 0xd1, - 0x02, 0x0c, 0x44, 0x0e, 0xf3, 0x8b, 0xe9, 0xcf, 0x77, 0xf7, 0xdd, 0xa0, 0x14, 0x7a, 0xfa, 0x14, - 0x5a, 0x00, 0x8b, 0x82, 0xe8, 0x63, 0xf2, 0x41, 0x2d, 0xdf, 0xd8, 0x85, 0x9f, 0x7d, 0x6f, 0x87, - 0x80, 0xf6, 0x9c, 0x56, 0xf8, 0xef, 0x1b, 0xbc, 0xd0, 0xcb, 0x00, 0xe4, 0x6e, 0x44, 0x02, 0xcf, - 0x69, 0x28, 0x6f, 0x36, 0x25, 0x17, 0x2c, 0xfb, 0xeb, 0x7e, 0x74, 0x33, 0x24, 0x2b, 0x8a, 0x02, - 0x6b, 0xd4, 0xf6, 0xaf, 0x97, 0x00, 0x62, 0xb9, 0x1d, 0xbd, 0x95, 0xda, 0xb8, 0x9e, 0xe9, 0x2c, - 0xe9, 0x1f, 0xdf, 0xae, 0x85, 0xbe, 0xcf, 0x82, 0x61, 0xa7, 0xd1, 0xf0, 0x6b, 0x0e, 0x8f, 0xf6, - 0x5b, 0xe8, 0xbc, 0x71, 0x8a, 0xfa, 0x17, 0xe2, 0x12, 0xbc, 0x09, 0xcf, 0xcb, 0x19, 0xaa, 0x61, - 0xba, 0xb6, 0x42, 0xaf, 0x18, 0xbd, 0x4f, 0x5e, 0x15, 0x8b, 0x46, 0x57, 0xaa, 0xab, 0x62, 0x89, - 0x9d, 0x11, 0xfa, 0x2d, 0xf1, 0xa6, 0x71, 0x4b, 0xec, 0xcb, 0x7f, 0x31, 0x68, 0x88, 0xaf, 0xdd, - 0x2e, 0x88, 0xa8, 0xa2, 0x47, 0x0f, 0xe8, 0xcf, 0x7f, 0x9e, 0xa7, 0xdd, 0x93, 0xba, 0x44, 0x0e, - 0xf8, 0x34, 0x8c, 0xd7, 0x4d, 0x21, 0x40, 0xcc, 0xc4, 0x27, 0xf3, 0xf8, 0x26, 0x64, 0x86, 0xf8, - 0xd8, 0x4f, 0x20, 0x70, 0x92, 0x31, 0xaa, 0xf0, 0x60, 0x12, 0x65, 0x6f, 0xcb, 0x17, 0x6f, 0x3d, - 0xec, 0xdc, 0xb1, 0xdc, 0x0f, 0x23, 0xd2, 0xa4, 0x94, 0xf1, 0xe9, 0xbe, 0x2e, 0xca, 0x62, 0xc5, - 0x05, 0xbd, 0x06, 0x03, 0xec, 0x7d, 0x56, 0x38, 0x3d, 0x94, 0xaf, 0x71, 0x36, 0xa3, 0xa3, 0xc5, - 0x0b, 0x92, 0xfd, 0x0d, 0xb1, 0xe0, 0x80, 0xae, 0xca, 0xd7, 0x8f, 0x61, 0xd9, 0xbb, 0x19, 0x12, - 0xf6, 0xfa, 0xb1, 0xb4, 0xf8, 0x78, 0xfc, 0xb0, 0x91, 0xc3, 0x33, 0x93, 0xac, 0x19, 0x25, 0xa9, - 0x14, 0x25, 0xfe, 0xcb, 0xdc, 0x6d, 0xd3, 0x90, 0xdf, 0x3c, 0x33, 0xbf, 0x5b, 0xdc, 0x9d, 0xb7, - 0x4c, 0x16, 0x38, 0xc9, 0x93, 0x4a, 0xa4, 0x7c, 0xd5, 0x8b, 0xd7, 0x22, 0xdd, 0xf6, 0x0e, 0x7e, - 0x11, 0x67, 0xa7, 0x11, 0x87, 0x60, 0x51, 0xfe, 0x44, 0xc5, 0x83, 0x19, 0x0f, 0x26, 0x92, 0x4b, - 0xf4, 0x81, 0x8a, 0x23, 0xbf, 0xdf, 0x07, 0x63, 0xe6, 0x94, 0x42, 0xf3, 0x50, 0x12, 0x4c, 0x54, - 0xfe, 0x03, 0xb5, 0x4a, 0xd6, 0x24, 0x02, 0xc7, 0x34, 0x2c, 0xed, 0x05, 0x2b, 0xae, 0xb9, 0x07, - 0xc7, 0x69, 0x2f, 0x14, 0x06, 0x6b, 0x54, 0xf4, 0x62, 0xb5, 0xe9, 0xfb, 0x91, 0x3a, 0x90, 0xd4, - 0xbc, 0x5b, 0x64, 0x50, 0x2c, 0xb0, 0xf4, 0x20, 0xda, 0x25, 0x81, 0x47, 0x1a, 0x66, 0xdc, 0x61, - 0x75, 0x10, 0x5d, 0xd3, 0x91, 0xd8, 0xa4, 0xa5, 0xc7, 0xa9, 0x1f, 0xb2, 0x89, 0x2c, 0xae, 0x6f, - 0xb1, 0xbb, 0x75, 0x95, 0x3f, 0xc0, 0x96, 0x78, 0xf4, 0x51, 0x78, 0x48, 0xc5, 0x56, 0xc2, 0xdc, - 0x9a, 0x21, 0x6b, 0x1c, 0x30, 0xb4, 0x2d, 0x0f, 0x2d, 0x65, 0x93, 0xe1, 0xbc, 0xf2, 0xe8, 0x55, - 0x18, 0x13, 0x22, 0xbe, 0xe4, 0x38, 0x68, 0x7a, 0x18, 0x5d, 0x33, 0xb0, 0x38, 0x41, 0x2d, 0x23, - 0x27, 0x33, 0x29, 0x5b, 0x72, 0x18, 0x4a, 0x47, 0x4e, 0xd6, 0xf1, 0x38, 0x55, 0x02, 0x2d, 0xc0, - 0x38, 0x97, 0xc1, 0x5c, 0x6f, 0x9b, 0x8f, 0x89, 0x78, 0xcc, 0xa5, 0x96, 0xd4, 0x0d, 0x13, 0x8d, - 0x93, 0xf4, 0xe8, 0x25, 0x18, 0x71, 0x82, 0xda, 0x8e, 0x1b, 0x91, 0x5a, 0xd4, 0x0e, 0xf8, 0x2b, - 0x2f, 0xcd, 0x45, 0x6b, 0x41, 0xc3, 0x61, 0x83, 0xd2, 0x7e, 0x0b, 0xa6, 0x32, 0x22, 0x33, 0xd0, - 0x89, 0xe3, 0xb4, 0x5c, 0xf9, 0x4d, 0x09, 0x0f, 0xe7, 0x85, 0x4a, 0x59, 0x7e, 0x8d, 0x46, 0x45, - 0x67, 0x27, 0x8b, 0xe0, 0xa0, 0xa5, 0x6a, 0x54, 0xb3, 0x73, 0x55, 0x22, 0x70, 0x4c, 0x63, 0xff, - 0xb7, 0x02, 0x8c, 0x67, 0xd8, 0x56, 0x58, 0xba, 0xc0, 0xc4, 0x25, 0x25, 0xce, 0x0e, 0x68, 0x06, - 0xe2, 0x2e, 0x1c, 0x21, 0x10, 0x77, 0xb1, 0x5b, 0x20, 0xee, 0xbe, 0xb7, 0x13, 0x88, 0xdb, 0xec, - 0xb1, 0xfe, 0x9e, 0x7a, 0x2c, 0x23, 0x78, 0xf7, 0xc0, 0x11, 0x83, 0x77, 0x1b, 0x9d, 0x3e, 0xd8, - 0x43, 0xa7, 0xff, 0x48, 0x01, 0x26, 0x92, 0xae, 0xa4, 0x27, 0xa0, 0xb7, 0x7d, 0xcd, 0xd0, 0xdb, - 0x5e, 0xea, 0xe5, 0xf1, 0x6d, 0xae, 0x0e, 0x17, 0x27, 0x74, 0xb8, 0xef, 0xed, 0x89, 0x5b, 0x67, - 0x7d, 0xee, 0x5f, 0x2f, 0xc0, 0xe9, 0xcc, 0xd7, 0xbf, 0x27, 0xd0, 0x37, 0x37, 0x8c, 0xbe, 0x79, - 0xb6, 0xe7, 0x87, 0xc9, 0xb9, 0x1d, 0x74, 0x3b, 0xd1, 0x41, 0xf3, 0xbd, 0xb3, 0xec, 0xdc, 0x4b, - 0x5f, 0x2f, 0xc2, 0xf9, 0xcc, 0x72, 0xb1, 0xda, 0x73, 0xd5, 0x50, 0x7b, 0x3e, 0x97, 0x50, 0x7b, - 0xda, 0x9d, 0x4b, 0x1f, 0x8f, 0x1e, 0x54, 0x3c, 0xd0, 0x65, 0x61, 0x06, 0xee, 0x53, 0x07, 0x6a, - 0x3c, 0xd0, 0x55, 0x8c, 0xb0, 0xc9, 0xf7, 0xdb, 0x49, 0xf7, 0xf9, 0xaf, 0x2c, 0x38, 0x9b, 0x39, - 0x36, 0x27, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d, 0x4f, 0xf5, 0x3c, 0x5b, 0x73, 0x94, 0x5f, 0x3f, - 0xd5, 0x9f, 0xf3, 0x2d, 0xec, 0x26, 0x7f, 0x03, 0x86, 0x9d, 0x5a, 0x8d, 0x84, 0xe1, 0x9a, 0x5f, - 0x57, 0xb1, 0x86, 0x9f, 0x65, 0xf7, 0xac, 0x18, 0x7c, 0x78, 0x30, 0x3b, 0x93, 0x64, 0x11, 0xa3, - 0xb1, 0xce, 0x01, 0x7d, 0x02, 0x86, 0x42, 0x71, 0x6e, 0x8a, 0xb1, 0x7f, 0xbe, 0xc7, 0xce, 0x71, - 0x36, 0x49, 0xc3, 0x0c, 0x86, 0xa4, 0x34, 0x15, 0x8a, 0xa5, 0x19, 0x38, 0xa5, 0x70, 0xac, 0x81, - 0x53, 0x9e, 0x03, 0xd8, 0x53, 0x97, 0x81, 0xa4, 0xfe, 0x41, 0xbb, 0x26, 0x68, 0x54, 0xe8, 0xc3, - 0x30, 0x11, 0xf2, 0x68, 0x81, 0x4b, 0x0d, 0x27, 0x64, 0xef, 0x68, 0xc4, 0x2c, 0x64, 0x01, 0x97, - 0xaa, 0x09, 0x1c, 0x4e, 0x51, 0xa3, 0x55, 0x59, 0x2b, 0x0b, 0x6d, 0xc8, 0x27, 0xe6, 0xc5, 0xb8, - 0x46, 0x91, 0xac, 0xf8, 0x54, 0xb2, 0xfb, 0x59, 0xc7, 0x6b, 0x25, 0xd1, 0x27, 0x00, 0xe8, 0xf4, - 0x11, 0x7a, 0x88, 0xc1, 0xfc, 0xcd, 0x93, 0xee, 0x2a, 0xf5, 0x4c, 0xe7, 0x66, 0xf6, 0xa6, 0x76, - 0x59, 0x31, 0xc1, 0x1a, 0x43, 0xe4, 0xc0, 0x68, 0xfc, 0x2f, 0xce, 0xe5, 0x79, 0x29, 0xb7, 0x86, - 0x24, 0x73, 0xa6, 0xf2, 0x5e, 0xd6, 0x59, 0x60, 0x93, 0xa3, 0xfd, 0x63, 0x83, 0xf0, 0x70, 0x87, - 0x6d, 0x18, 0x2d, 0x98, 0xa6, 0xde, 0xa7, 0x93, 0xf7, 0xf7, 0x99, 0xcc, 0xc2, 0xc6, 0x85, 0x3e, - 0x31, 0xdb, 0x0b, 0x6f, 0x7b, 0xb6, 0xff, 0x90, 0xa5, 0x69, 0x56, 0xb8, 0x53, 0xe9, 0x87, 0x8e, - 0x78, 0xbc, 0x1c, 0xa3, 0xaa, 0x65, 0x2b, 0x43, 0x5f, 0xf1, 0x5c, 0xcf, 0xcd, 0xe9, 0x5d, 0x81, - 0xf1, 0x55, 0x0b, 0x90, 0xd0, 0xac, 0x90, 0xba, 0x5a, 0x4b, 0x42, 0x95, 0x71, 0xe5, 0xa8, 0xdf, - 0xbf, 0x90, 0xe2, 0xc4, 0x7b, 0xe2, 0x65, 0x79, 0x0e, 0xa4, 0x09, 0xba, 0xf6, 0x49, 0x46, 0xf3, - 0xd0, 0x47, 0x59, 0x20, 0x5d, 0xf7, 0x2d, 0x21, 0xfc, 0x88, 0xb5, 0xf6, 0xa2, 0x08, 0xa2, 0xab, - 0xe0, 0x54, 0xca, 0xcd, 0x6c, 0xae, 0x4e, 0x84, 0x0d, 0x56, 0x27, 0x7b, 0xf5, 0x6e, 0xc3, 0x43, - 0x39, 0x5d, 0xf6, 0x40, 0x6f, 0xe0, 0xbf, 0x69, 0xc1, 0xb9, 0x8e, 0x11, 0x61, 0xbe, 0x05, 0x65, - 0x43, 0xfb, 0x73, 0x16, 0x64, 0x0f, 0xb6, 0xe1, 0x51, 0x36, 0x0f, 0xa5, 0x5a, 0x22, 0xeb, 0x60, - 0x1c, 0x1b, 0x41, 0x65, 0x1c, 0x8c, 0x69, 0x0c, 0xc7, 0xb1, 0x42, 0x57, 0xc7, 0xb1, 0x5f, 0xb1, - 0x20, 0xb5, 0xbf, 0x9f, 0x80, 0xa0, 0x51, 0x36, 0x05, 0x8d, 0xc7, 0x7b, 0xe9, 0xcd, 0x1c, 0x19, - 0xe3, 0x8f, 0xc6, 0xe1, 0x4c, 0xce, 0x8b, 0xbc, 0x3d, 0x98, 0xdc, 0xae, 0x11, 0xf3, 0x71, 0x75, - 0xa7, 0xa0, 0x43, 0x1d, 0x5f, 0x62, 0xf3, 0x64, 0x8f, 0x29, 0x12, 0x9c, 0xae, 0x02, 0x7d, 0xce, - 0x82, 0x53, 0xce, 0x9d, 0x70, 0x85, 0x0a, 0x8c, 0x6e, 0x6d, 0xb1, 0xe1, 0xd7, 0x76, 0xe9, 0x69, - 0x2c, 0x17, 0xc2, 0x0b, 0x99, 0x4a, 0xbc, 0xdb, 0xd5, 0x14, 0xbd, 0x51, 0x3d, 0x4b, 0xed, 0x9b, - 0x45, 0x85, 0x33, 0xeb, 0x42, 0x58, 0x64, 0x4f, 0xa0, 0xd7, 0xd1, 0x0e, 0xcf, 0xff, 0xb3, 0x9e, - 0x4e, 0x72, 0x09, 0x48, 0x62, 0xb0, 0xe2, 0x83, 0x3e, 0x05, 0xa5, 0x6d, 0xf9, 0xd2, 0x37, 0x43, - 0xc2, 0x8a, 0x3b, 0xb2, 0xf3, 0xfb, 0x67, 0x6e, 0x89, 0x57, 0x44, 0x38, 0x66, 0x8a, 0x5e, 0x85, - 0xa2, 0xb7, 0x15, 0x76, 0xca, 0x8e, 0x9b, 0x70, 0xb9, 0xe4, 0x41, 0x36, 0xd6, 0x57, 0xab, 0x98, - 0x16, 0x44, 0x57, 0xa1, 0x18, 0x6c, 0xd6, 0x85, 0x06, 0x3a, 0x73, 0x91, 0xe2, 0xc5, 0xe5, 0x9c, - 0x56, 0x31, 0x4e, 0x78, 0x71, 0x19, 0x53, 0x16, 0xa8, 0x02, 0xfd, 0xec, 0x19, 0x9b, 0x90, 0x67, - 0x32, 0x6f, 0x6e, 0x1d, 0x9e, 0x83, 0xf2, 0x48, 0x1c, 0x8c, 0x00, 0x73, 0x46, 0x68, 0x03, 0x06, - 0x6a, 0x2c, 0x93, 0xaa, 0x10, 0x60, 0xde, 0x97, 0xa9, 0x6b, 0xee, 0x90, 0x62, 0x56, 0xa8, 0x5e, - 0x19, 0x05, 0x16, 0xbc, 0x18, 0x57, 0xd2, 0xda, 0xd9, 0x0a, 0x45, 0xa6, 0xf1, 0x6c, 0xae, 0x1d, - 0x32, 0x27, 0x0b, 0xae, 0x8c, 0x02, 0x0b, 0x5e, 0xe8, 0x65, 0x28, 0x6c, 0xd5, 0xc4, 0x13, 0xb5, - 0x4c, 0xa5, 0xb3, 0x19, 0x27, 0x65, 0x71, 0xe0, 0xde, 0xc1, 0x6c, 0x61, 0x75, 0x09, 0x17, 0xb6, - 0x6a, 0x68, 0x1d, 0x06, 0xb7, 0x78, 0x64, 0x05, 0xa1, 0x57, 0x7e, 0x32, 0x3b, 0xe8, 0x43, 0x2a, - 0xf8, 0x02, 0x7f, 0xee, 0x24, 0x10, 0x58, 0x32, 0x61, 0xc9, 0x08, 0x54, 0x84, 0x08, 0x11, 0xa0, - 0x6e, 0xee, 0x68, 0x51, 0x3d, 0xb8, 0x7c, 0x19, 0xc7, 0x99, 0xc0, 0x1a, 0x47, 0x3a, 0xab, 0x9d, - 0xb7, 0xda, 0x01, 0x8b, 0x02, 0x2e, 0x22, 0x19, 0x65, 0xce, 0xea, 0x05, 0x49, 0xd4, 0x69, 0x56, - 0x2b, 0x22, 0x1c, 0x33, 0x45, 0xbb, 0x30, 0xba, 0x17, 0xb6, 0x76, 0x88, 0x5c, 0xd2, 0x2c, 0xb0, - 0x51, 0x8e, 0x7c, 0x74, 0x4b, 0x10, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x5d, 0x88, 0xc9, 0xb2, - 0xb7, 0x74, 0x66, 0xd8, 0xe4, 0x4d, 0xbb, 0xff, 0xcd, 0xb6, 0xbf, 0xb9, 0x1f, 0x11, 0x11, 0x57, - 0x2e, 0xb3, 0xfb, 0x5f, 0xe7, 0x24, 0xe9, 0xee, 0x17, 0x08, 0x2c, 0x99, 0xa0, 0x5b, 0xa2, 0x7b, - 0xd8, 0xee, 0x39, 0x91, 0x1f, 0xfc, 0x75, 0x41, 0x12, 0xe5, 0x74, 0x0a, 0xdb, 0x2d, 0x63, 0x56, - 0x6c, 0x97, 0x6c, 0xed, 0xf8, 0x91, 0xef, 0x25, 0x76, 0xe8, 0xc9, 0xfc, 0x5d, 0xb2, 0x92, 0x41, - 0x9f, 0xde, 0x25, 0xb3, 0xa8, 0x70, 0x66, 0x5d, 0xa8, 0x0e, 0x63, 0x2d, 0x3f, 0x88, 0xee, 0xf8, - 0x81, 0x9c, 0x5f, 0xa8, 0x83, 0x5e, 0xcc, 0xa0, 0x14, 0x35, 0xb2, 0x90, 0x8d, 0x26, 0x06, 0x27, - 0x78, 0xa2, 0x8f, 0xc0, 0x60, 0x58, 0x73, 0x1a, 0xa4, 0x7c, 0x63, 0x7a, 0x2a, 0xff, 0xf8, 0xa9, - 0x72, 0x92, 0x9c, 0xd9, 0xc5, 0x03, 0x63, 0x70, 0x12, 0x2c, 0xd9, 0xa1, 0x55, 0xe8, 0x67, 0xc9, - 0xe6, 0x58, 0x10, 0xc4, 0x9c, 0x18, 0xb6, 0x29, 0x07, 0x78, 0xbe, 0x37, 0x31, 0x30, 0xe6, 0xc5, - 0xe9, 0x1a, 0x10, 0xd7, 0x43, 0x3f, 0x9c, 0x3e, 0x9d, 0xbf, 0x06, 0xc4, 0xad, 0xf2, 0x46, 0xb5, - 0xd3, 0x1a, 0x50, 0x44, 0x38, 0x66, 0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xe9, 0xe0, 0xb9, 0x95, - 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee, 0xa4, 0x94, 0x85, 0xfd, 0xbb, 0x83, 0x69, 0x99, 0x85, 0x29, - 0x14, 0xbe, 0xc7, 0x4a, 0xd9, 0x9a, 0xdf, 0xdf, 0xab, 0x7e, 0xf3, 0x18, 0xaf, 0x42, 0x9f, 0xb3, - 0xe0, 0x4c, 0x2b, 0xf3, 0x43, 0x84, 0x00, 0xd0, 0x9b, 0x9a, 0x94, 0x7f, 0xba, 0x0a, 0x98, 0x99, - 0x8d, 0xc7, 0x39, 0x35, 0x25, 0xaf, 0x9b, 0xc5, 0xb7, 0x7d, 0xdd, 0x5c, 0x83, 0xa1, 0x1a, 0xbf, - 0x8a, 0x74, 0xcc, 0x2c, 0x9e, 0xbc, 0x7b, 0x33, 0x51, 0x42, 0xdc, 0x61, 0xb6, 0xb0, 0x62, 0x81, - 0x7e, 0xd8, 0x82, 0x73, 0xc9, 0xa6, 0x63, 0xc2, 0xd0, 0x22, 0xca, 0x26, 0xd7, 0x65, 0xac, 0x8a, - 0xef, 0x4f, 0xc9, 0xff, 0x06, 0xf1, 0x61, 0x37, 0x02, 0xdc, 0xb9, 0x32, 0xb4, 0x9c, 0xa1, 0x4c, - 0x19, 0x30, 0x0d, 0x48, 0x3d, 0x28, 0x54, 0x5e, 0x80, 0x91, 0xa6, 0xdf, 0xf6, 0x22, 0xe1, 0xe8, - 0x25, 0x9c, 0x4e, 0x98, 0xb3, 0xc5, 0x9a, 0x06, 0xc7, 0x06, 0x55, 0x42, 0x0d, 0x33, 0x74, 0xdf, - 0x6a, 0x98, 0x37, 0x60, 0xc4, 0xd3, 0x3c, 0x93, 0x85, 0x3c, 0x70, 0x31, 0x3f, 0x42, 0xae, 0xee, - 0xc7, 0xcc, 0x5b, 0xa9, 0x43, 0xb0, 0xc1, 0xed, 0x64, 0x3d, 0xc0, 0xbe, 0x64, 0x65, 0x08, 0xf5, - 0x5c, 0x15, 0xf3, 0x41, 0x53, 0x15, 0x73, 0x31, 0xa9, 0x8a, 0x49, 0x19, 0x0f, 0x0c, 0x2d, 0x4c, - 0xef, 0x09, 0x80, 0x7a, 0x8d, 0xb2, 0x69, 0x37, 0xe0, 0x42, 0xb7, 0x63, 0x89, 0x79, 0xfc, 0xd5, - 0x95, 0xa9, 0x38, 0xf6, 0xf8, 0xab, 0x97, 0x97, 0x31, 0xc3, 0xf4, 0x1a, 0xbf, 0xc9, 0xfe, 0x2f, - 0x16, 0x14, 0x2b, 0x7e, 0xfd, 0x04, 0x2e, 0xbc, 0x1f, 0x32, 0x2e, 0xbc, 0x0f, 0x67, 0x1f, 0x88, - 0xf5, 0x5c, 0xd3, 0xc7, 0x4a, 0xc2, 0xf4, 0x71, 0x2e, 0x8f, 0x41, 0x67, 0x43, 0xc7, 0x4f, 0x16, - 0x61, 0xb8, 0xe2, 0xd7, 0x95, 0xbb, 0xfd, 0x3f, 0xbd, 0x1f, 0x77, 0xfb, 0xdc, 0x34, 0x16, 0x1a, - 0x67, 0xe6, 0x28, 0x28, 0x5f, 0x1a, 0x7f, 0x8b, 0x79, 0xdd, 0xdf, 0x26, 0xee, 0xf6, 0x4e, 0x44, - 0xea, 0xc9, 0xcf, 0x39, 0x39, 0xaf, 0xfb, 0xdf, 0x2d, 0xc0, 0x78, 0xa2, 0x76, 0xd4, 0x80, 0xd1, - 0x86, 0xae, 0x58, 0x17, 0xf3, 0xf4, 0xbe, 0x74, 0xf2, 0xc2, 0x6b, 0x59, 0x03, 0x61, 0x93, 0x39, - 0x9a, 0x03, 0x50, 0x96, 0x66, 0xa9, 0x5e, 0x65, 0x52, 0xbf, 0x32, 0x45, 0x87, 0x58, 0xa3, 0x40, - 0x2f, 0xc2, 0x70, 0xe4, 0xb7, 0xfc, 0x86, 0xbf, 0xbd, 0x7f, 0x8d, 0xc8, 0xd0, 0x5e, 0xca, 0x17, - 0x71, 0x23, 0x46, 0x61, 0x9d, 0x0e, 0xdd, 0x85, 0x49, 0xc5, 0xa4, 0x7a, 0x0c, 0xc6, 0x06, 0xa6, - 0x55, 0x58, 0x4f, 0x72, 0xc4, 0xe9, 0x4a, 0xec, 0x9f, 0x29, 0xf2, 0x2e, 0xf6, 0x22, 0xf7, 0xdd, - 0xd5, 0xf0, 0xce, 0x5e, 0x0d, 0x5f, 0xb7, 0x60, 0x82, 0xd6, 0xce, 0x1c, 0xad, 0xe4, 0x31, 0xaf, - 0x62, 0x72, 0x5b, 0x1d, 0x62, 0x72, 0x5f, 0xa4, 0xbb, 0x66, 0xdd, 0x6f, 0x47, 0x42, 0x77, 0xa7, - 0x6d, 0x8b, 0x14, 0x8a, 0x05, 0x56, 0xd0, 0x91, 0x20, 0x10, 0x8f, 0x43, 0x75, 0x3a, 0x12, 0x04, - 0x58, 0x60, 0x65, 0xc8, 0xee, 0xbe, 0xec, 0x90, 0xdd, 0x3c, 0xf2, 0xaa, 0x70, 0xc9, 0x11, 0x02, - 0x97, 0x16, 0x79, 0x55, 0xfa, 0xea, 0xc4, 0x34, 0xf6, 0x57, 0x8b, 0x30, 0x52, 0xf1, 0xeb, 0xb1, - 0x95, 0xf9, 0x05, 0xc3, 0xca, 0x7c, 0x21, 0x61, 0x65, 0x9e, 0xd0, 0x69, 0xdf, 0xb5, 0x29, 0x7f, - 0xb3, 0x6c, 0xca, 0xbf, 0x6c, 0xb1, 0x51, 0x5b, 0x5e, 0xaf, 0x72, 0xbf, 0x3d, 0x74, 0x19, 0x86, - 0xd9, 0x06, 0xc3, 0x5e, 0x23, 0x4b, 0xd3, 0x2b, 0x4b, 0x45, 0xb5, 0x1e, 0x83, 0xb1, 0x4e, 0x83, - 0x2e, 0xc1, 0x50, 0x48, 0x9c, 0xa0, 0xb6, 0xa3, 0x76, 0x57, 0x61, 0x27, 0xe5, 0x30, 0xac, 0xb0, - 0xe8, 0xf5, 0x38, 0xe8, 0x67, 0x31, 0xff, 0x75, 0xa3, 0xde, 0x1e, 0xbe, 0x44, 0xf2, 0x23, 0x7d, - 0xda, 0xb7, 0x01, 0xa5, 0xe9, 0x7b, 0x08, 0x4b, 0x37, 0x6b, 0x86, 0xa5, 0x2b, 0xa5, 0x42, 0xd2, - 0xfd, 0x89, 0x05, 0x63, 0x15, 0xbf, 0x4e, 0x97, 0xee, 0xb7, 0xd3, 0x3a, 0xd5, 0x23, 0x1e, 0x0f, - 0x74, 0x88, 0x78, 0xfc, 0x18, 0xf4, 0x57, 0xfc, 0x7a, 0xb9, 0xd2, 0x29, 0xb4, 0x80, 0xfd, 0x37, - 0x2c, 0x18, 0xac, 0xf8, 0xf5, 0x13, 0x30, 0x0b, 0x7c, 0xd0, 0x34, 0x0b, 0x3c, 0x94, 0x33, 0x6f, - 0x72, 0x2c, 0x01, 0x7f, 0xad, 0x0f, 0x46, 0x69, 0x3b, 0xfd, 0x6d, 0x39, 0x94, 0x46, 0xb7, 0x59, - 0x3d, 0x74, 0x1b, 0x95, 0xc2, 0xfd, 0x46, 0xc3, 0xbf, 0x93, 0x1c, 0xd6, 0x55, 0x06, 0xc5, 0x02, - 0x8b, 0x9e, 0x81, 0xa1, 0x56, 0x40, 0xf6, 0x5c, 0x5f, 0x88, 0xb7, 0x9a, 0x91, 0xa5, 0x22, 0xe0, - 0x58, 0x51, 0xd0, 0x6b, 0x61, 0xe8, 0x7a, 0xf4, 0x28, 0xaf, 0xf9, 0x5e, 0x9d, 0x6b, 0xce, 0x8b, - 0x22, 0x2d, 0x87, 0x06, 0xc7, 0x06, 0x15, 0xba, 0x0d, 0x25, 0xf6, 0x9f, 0x6d, 0x3b, 0x47, 0x4f, - 0xf0, 0x2a, 0x12, 0xfe, 0x09, 0x06, 0x38, 0xe6, 0x85, 0x9e, 0x03, 0x88, 0x64, 0x68, 0xfb, 0x50, - 0x04, 0x5a, 0x53, 0x57, 0x01, 0x15, 0xf4, 0x3e, 0xc4, 0x1a, 0x15, 0x7a, 0x1a, 0x4a, 0x91, 0xe3, - 0x36, 0xae, 0xbb, 0x1e, 0x09, 0x99, 0x46, 0xbc, 0x28, 0xf3, 0xee, 0x09, 0x20, 0x8e, 0xf1, 0x54, - 0x14, 0x63, 0x41, 0x38, 0x78, 0x7a, 0xe8, 0x21, 0x46, 0xcd, 0x44, 0xb1, 0xeb, 0x0a, 0x8a, 0x35, - 0x0a, 0xb4, 0x03, 0x8f, 0xb8, 0x1e, 0x4b, 0x61, 0x41, 0xaa, 0xbb, 0x6e, 0x6b, 0xe3, 0x7a, 0xf5, - 0x16, 0x09, 0xdc, 0xad, 0xfd, 0x45, 0xa7, 0xb6, 0x4b, 0x3c, 0x99, 0xba, 0xf3, 0x71, 0xd1, 0xc4, - 0x47, 0xca, 0x1d, 0x68, 0x71, 0x47, 0x4e, 0xf6, 0xf3, 0x6c, 0xbe, 0xdf, 0xa8, 0xa2, 0xf7, 0x1a, - 0x5b, 0xc7, 0x19, 0x7d, 0xeb, 0x38, 0x3c, 0x98, 0x1d, 0xb8, 0x51, 0xd5, 0x62, 0x48, 0xbc, 0x04, - 0xa7, 0x2b, 0x7e, 0xbd, 0xe2, 0x07, 0xd1, 0xaa, 0x1f, 0xdc, 0x71, 0x82, 0xba, 0x9c, 0x5e, 0xb3, - 0x32, 0x8a, 0x06, 0xdd, 0x3f, 0xfb, 0xf9, 0xee, 0x62, 0x44, 0xc8, 0x78, 0x9e, 0x49, 0x6c, 0x47, - 0x7c, 0xfb, 0x55, 0x63, 0xb2, 0x83, 0x4a, 0x02, 0x73, 0xc5, 0x89, 0x08, 0xba, 0xc1, 0x92, 0x5b, - 0xc7, 0xc7, 0xa8, 0x28, 0xfe, 0x94, 0x96, 0xdc, 0x3a, 0x46, 0x66, 0x9e, 0xbb, 0x66, 0x79, 0xfb, - 0xb3, 0xa2, 0x12, 0x7e, 0x07, 0xe7, 0xfe, 0x75, 0xbd, 0x64, 0xb7, 0x95, 0x59, 0x22, 0x0a, 0xf9, - 0xe9, 0x05, 0xb8, 0xd5, 0xb3, 0x63, 0x96, 0x08, 0xfb, 0x45, 0x98, 0xa4, 0x57, 0x3f, 0x25, 0x47, - 0xb1, 0x8f, 0xec, 0x1e, 0xcd, 0xe3, 0xbf, 0xf6, 0xb3, 0x73, 0x20, 0x91, 0xfe, 0x04, 0x7d, 0x12, - 0xc6, 0x42, 0x72, 0xdd, 0xf5, 0xda, 0x77, 0xa5, 0xe2, 0xa5, 0xc3, 0x9b, 0xc3, 0xea, 0x8a, 0x4e, - 0xc9, 0xd5, 0xb7, 0x26, 0x0c, 0x27, 0xb8, 0xa1, 0x26, 0x8c, 0xdd, 0x71, 0xbd, 0xba, 0x7f, 0x27, - 0x94, 0xfc, 0x87, 0xf2, 0xb5, 0xb8, 0xb7, 0x39, 0x65, 0xa2, 0x8d, 0x46, 0x75, 0xb7, 0x0d, 0x66, - 0x38, 0xc1, 0x9c, 0xae, 0xb5, 0xa0, 0xed, 0x2d, 0x84, 0x37, 0x43, 0x12, 0x88, 0xe4, 0xea, 0x6c, - 0xad, 0x61, 0x09, 0xc4, 0x31, 0x9e, 0xae, 0x35, 0xf6, 0xe7, 0x4a, 0xe0, 0xb7, 0x79, 0xae, 0x0d, - 0xb1, 0xd6, 0xb0, 0x82, 0x62, 0x8d, 0x82, 0xee, 0x45, 0xec, 0xdf, 0xba, 0xef, 0x61, 0xdf, 0x8f, - 0xe4, 0xee, 0xc5, 0x3c, 0x11, 0x34, 0x38, 0x36, 0xa8, 0xd0, 0x2a, 0xa0, 0xb0, 0xdd, 0x6a, 0x35, - 0x98, 0x33, 0x93, 0xd3, 0x60, 0xac, 0xb8, 0x97, 0x47, 0x91, 0xc7, 0x0a, 0xae, 0xa6, 0xb0, 0x38, - 0xa3, 0x04, 0x3d, 0x96, 0xb6, 0x44, 0x53, 0xfb, 0x59, 0x53, 0xb9, 0xc5, 0xa7, 0xca, 0xdb, 0x29, - 0x71, 0x68, 0x05, 0x06, 0xc3, 0xfd, 0xb0, 0x16, 0x89, 0xd0, 0x8e, 0x39, 0x19, 0xae, 0xaa, 0x8c, - 0x44, 0x4b, 0xb0, 0xc8, 0x8b, 0x60, 0x59, 0x16, 0xd5, 0x60, 0x4a, 0x70, 0x5c, 0xda, 0x71, 0x3c, - 0x95, 0x2f, 0x88, 0xfb, 0x74, 0x5f, 0xbe, 0x77, 0x30, 0x3b, 0x25, 0x6a, 0xd6, 0xd1, 0x87, 0x07, - 0xb3, 0x67, 0x2a, 0x7e, 0x3d, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, 0xab, 0xf9, 0xcd, 0x56, - 0x25, 0xf0, 0xb7, 0xdc, 0x06, 0xe9, 0x64, 0x35, 0xab, 0x1a, 0x94, 0x62, 0xf2, 0x19, 0x30, 0x9c, - 0xe0, 0x66, 0x7f, 0x96, 0x89, 0x6e, 0x2c, 0x9f, 0x78, 0xd4, 0x0e, 0x08, 0x6a, 0xc2, 0x68, 0x8b, - 0x2d, 0x6e, 0x91, 0x01, 0x43, 0xcc, 0xf5, 0x17, 0x7a, 0xd4, 0xfe, 0xdc, 0xa1, 0x27, 0x9e, 0xe9, - 0x19, 0x55, 0xd1, 0xd9, 0x61, 0x93, 0xbb, 0xfd, 0x6f, 0xce, 0xb2, 0xc3, 0xbf, 0xca, 0x55, 0x3a, - 0x83, 0xe2, 0x09, 0x89, 0xb8, 0x45, 0xce, 0xe4, 0xeb, 0x16, 0xe3, 0x61, 0x11, 0xcf, 0x50, 0xb0, - 0x2c, 0x8b, 0x3e, 0x01, 0x63, 0xf4, 0x52, 0xa6, 0x0e, 0xe0, 0x70, 0xfa, 0x54, 0x7e, 0xa8, 0x0f, - 0x45, 0xa5, 0x67, 0xc7, 0xd1, 0x0b, 0xe3, 0x04, 0x33, 0xf4, 0x3a, 0xf3, 0x44, 0x92, 0xac, 0x0b, - 0xbd, 0xb0, 0xd6, 0x9d, 0x8e, 0x24, 0x5b, 0x8d, 0x09, 0x6a, 0xc3, 0x54, 0x3a, 0x97, 0x5e, 0x38, - 0x6d, 0xe7, 0x4b, 0xb7, 0xe9, 0x74, 0x78, 0x71, 0x1a, 0x93, 0x34, 0x2e, 0xc4, 0x59, 0xfc, 0xd1, - 0x75, 0x18, 0x15, 0x49, 0xb5, 0xc5, 0xcc, 0x2d, 0x1a, 0x2a, 0xcf, 0x51, 0xac, 0x23, 0x0f, 0x93, - 0x00, 0x6c, 0x16, 0x46, 0xdb, 0x70, 0x4e, 0x4b, 0x72, 0x75, 0x25, 0x70, 0x98, 0xdf, 0x82, 0xcb, - 0xb6, 0x53, 0x4d, 0x2c, 0x79, 0xf4, 0xde, 0xc1, 0xec, 0xb9, 0x8d, 0x4e, 0x84, 0xb8, 0x33, 0x1f, - 0x74, 0x03, 0x4e, 0xf3, 0x87, 0xea, 0xcb, 0xc4, 0xa9, 0x37, 0x5c, 0x4f, 0xc9, 0x3d, 0x7c, 0xc9, - 0x9f, 0xbd, 0x77, 0x30, 0x7b, 0x7a, 0x21, 0x8b, 0x00, 0x67, 0x97, 0x43, 0x1f, 0x84, 0x52, 0xdd, - 0x0b, 0x45, 0x1f, 0x0c, 0x18, 0x79, 0xc4, 0x4a, 0xcb, 0xeb, 0x55, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, - 0x17, 0x40, 0xdb, 0x5c, 0x2d, 0xae, 0x94, 0x35, 0x83, 0xa9, 0x40, 0x5d, 0x49, 0x7d, 0xa6, 0xf1, - 0x54, 0x95, 0xdb, 0x83, 0xd4, 0x0b, 0x0e, 0xe3, 0x15, 0xab, 0xc1, 0x18, 0xbd, 0x06, 0x48, 0xc4, - 0xab, 0x5f, 0xa8, 0xb1, 0xf4, 0x2a, 0xcc, 0x8a, 0x30, 0x64, 0x3e, 0x9e, 0xac, 0xa6, 0x28, 0x70, - 0x46, 0x29, 0x74, 0x95, 0xee, 0x2a, 0x3a, 0x54, 0xec, 0x5a, 0x2a, 0xeb, 0xe3, 0x32, 0x69, 0x05, - 0x84, 0xf9, 0x61, 0x99, 0x1c, 0x71, 0xa2, 0x1c, 0xaa, 0xc3, 0x23, 0x4e, 0x3b, 0xf2, 0x99, 0xc5, - 0xc1, 0x24, 0xdd, 0xf0, 0x77, 0x89, 0xc7, 0x8c, 0x7d, 0x43, 0x8b, 0x17, 0xa8, 0x60, 0xb5, 0xd0, - 0x81, 0x0e, 0x77, 0xe4, 0x42, 0x05, 0x62, 0x95, 0xe6, 0x19, 0xcc, 0xf0, 0x63, 0x19, 0xa9, 0x9e, - 0x5f, 0x84, 0xe1, 0x1d, 0x3f, 0x8c, 0xd6, 0x49, 0x74, 0xc7, 0x0f, 0x76, 0x45, 0x18, 0xdd, 0x38, - 0x28, 0x79, 0x8c, 0xc2, 0x3a, 0x1d, 0xbd, 0xf1, 0x32, 0x57, 0x94, 0xf2, 0x32, 0xf3, 0x02, 0x18, - 0x8a, 0xf7, 0x98, 0xab, 0x1c, 0x8c, 0x25, 0x5e, 0x92, 0x96, 0x2b, 0x4b, 0xcc, 0xa2, 0x9f, 0x20, - 0x2d, 0x57, 0x96, 0xb0, 0xc4, 0xd3, 0xe9, 0x1a, 0xee, 0x38, 0x01, 0xa9, 0x04, 0x7e, 0x8d, 0x84, - 0x5a, 0x28, 0xfc, 0x87, 0x79, 0x90, 0x60, 0x3a, 0x5d, 0xab, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0x22, - 0xe9, 0x04, 0x6f, 0x63, 0xf9, 0xa6, 0x98, 0xb4, 0x3c, 0xd3, 0x63, 0x8e, 0x37, 0x0f, 0x26, 0x54, - 0x6a, 0x39, 0x1e, 0x16, 0x38, 0x9c, 0x1e, 0x67, 0x73, 0xbb, 0xf7, 0x98, 0xc2, 0xca, 0xb8, 0x55, - 0x4e, 0x70, 0xc2, 0x29, 0xde, 0x46, 0x84, 0xb9, 0x89, 0xae, 0x11, 0xe6, 0xe6, 0xa1, 0x14, 0xb6, - 0x37, 0xeb, 0x7e, 0xd3, 0x71, 0x3d, 0x66, 0xd1, 0xd7, 0xae, 0x5e, 0x55, 0x89, 0xc0, 0x31, 0x0d, - 0x5a, 0x85, 0x21, 0x47, 0x5a, 0xae, 0x50, 0x7e, 0x4c, 0x21, 0x65, 0xaf, 0xe2, 0x61, 0x36, 0xa4, - 0xad, 0x4a, 0x95, 0x45, 0xaf, 0xc0, 0xa8, 0x78, 0x68, 0x2d, 0xb2, 0x9a, 0x4e, 0x99, 0xaf, 0xe1, - 0xaa, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x26, 0x0c, 0x47, 0x7e, 0x83, 0x3d, 0xe9, 0xa2, 0x62, 0xde, - 0x99, 0xfc, 0xe8, 0x78, 0x1b, 0x8a, 0x4c, 0x57, 0x1a, 0xab, 0xa2, 0x58, 0xe7, 0x83, 0x36, 0xf8, - 0x7c, 0x67, 0x81, 0xef, 0x49, 0x38, 0xfd, 0x50, 0xfe, 0x99, 0xa4, 0xe2, 0xe3, 0x9b, 0xcb, 0x41, - 0x94, 0xc4, 0x3a, 0x1b, 0x74, 0x05, 0x26, 0x5b, 0x81, 0xeb, 0xb3, 0x39, 0xa1, 0x8c, 0x96, 0xd3, - 0x66, 0x9a, 0xab, 0x4a, 0x92, 0x00, 0xa7, 0xcb, 0xb0, 0x77, 0xf2, 0x02, 0x38, 0x7d, 0x96, 0xa7, - 0xea, 0xe0, 0x37, 0x59, 0x0e, 0xc3, 0x0a, 0x8b, 0xd6, 0xd8, 0x4e, 0xcc, 0x95, 0x30, 0xd3, 0x33, - 0xf9, 0x61, 0x8c, 0x74, 0x65, 0x0d, 0x17, 0x5e, 0xd5, 0x5f, 0x1c, 0x73, 0x40, 0x75, 0x2d, 0x43, - 0x26, 0xbd, 0x02, 0x84, 0xd3, 0x8f, 0x74, 0xf0, 0x07, 0x4c, 0x5c, 0x8a, 0x62, 0x81, 0xc0, 0x00, - 0x87, 0x38, 0xc1, 0x13, 0x7d, 0x18, 0x26, 0x44, 0xf0, 0xc5, 0xb8, 0x9b, 0xce, 0xc5, 0x8e, 0xf2, - 0x38, 0x81, 0xc3, 0x29, 0x6a, 0x9e, 0x2a, 0xc3, 0xd9, 0x6c, 0x10, 0xb1, 0xf5, 0x5d, 0x77, 0xbd, - 0xdd, 0x70, 0xfa, 0x3c, 0xdb, 0x1f, 0x44, 0xaa, 0x8c, 0x24, 0x16, 0x67, 0x94, 0x40, 0x1b, 0x30, - 0xd1, 0x0a, 0x08, 0x69, 0x32, 0x41, 0x5f, 0x9c, 0x67, 0xb3, 0x3c, 0x4c, 0x04, 0x6d, 0x49, 0x25, - 0x81, 0x3b, 0xcc, 0x80, 0xe1, 0x14, 0x07, 0x74, 0x07, 0x86, 0xfc, 0x3d, 0x12, 0xec, 0x10, 0xa7, - 0x3e, 0x7d, 0xa1, 0xc3, 0xc3, 0x0d, 0x71, 0xb8, 0xdd, 0x10, 0xb4, 0x09, 0x47, 0x07, 0x09, 0xee, - 0xee, 0xe8, 0x20, 0x2b, 0x43, 0x7f, 0xde, 0x82, 0xb3, 0xd2, 0x36, 0x52, 0x6d, 0xd1, 0x5e, 0x5f, - 0xf2, 0xbd, 0x30, 0x0a, 0x78, 0x60, 0x83, 0x47, 0xf3, 0x1f, 0xfb, 0x6f, 0xe4, 0x14, 0x52, 0x7a, - 0xe0, 0xb3, 0x79, 0x14, 0x21, 0xce, 0xaf, 0x11, 0x2d, 0xc1, 0x64, 0x48, 0x22, 0xb9, 0x19, 0x2d, - 0x84, 0xab, 0xaf, 0x2f, 0xaf, 0x4f, 0x3f, 0xc6, 0xa3, 0x32, 0xd0, 0xc5, 0x50, 0x4d, 0x22, 0x71, - 0x9a, 0x1e, 0x5d, 0x86, 0x82, 0x1f, 0x4e, 0x3f, 0xde, 0x21, 0xa9, 0xaa, 0x5f, 0xbf, 0x51, 0xe5, - 0x0e, 0x6f, 0x37, 0xaa, 0xb8, 0xe0, 0x87, 0x32, 0x5d, 0x05, 0xbd, 0x8f, 0x85, 0xd3, 0x4f, 0x70, - 0xad, 0xa1, 0x4c, 0x57, 0xc1, 0x80, 0x38, 0xc6, 0xa3, 0x1d, 0x18, 0x0f, 0x8d, 0x7b, 0x6f, 0x38, - 0x7d, 0x91, 0xf5, 0xd4, 0x13, 0x79, 0x83, 0x66, 0x50, 0x6b, 0xd1, 0xe6, 0x4d, 0x2e, 0x38, 0xc9, - 0x96, 0xaf, 0x2e, 0xed, 0x82, 0x1f, 0x4e, 0x3f, 0xd9, 0x65, 0x75, 0x69, 0xc4, 0xfa, 0xea, 0xd2, - 0x79, 0xe0, 0x04, 0xcf, 0x99, 0xef, 0x84, 0xc9, 0x94, 0xb8, 0x74, 0x94, 0x4c, 0x4c, 0x33, 0xbb, - 0x30, 0x6a, 0x4c, 0xc9, 0x07, 0xea, 0x58, 0xf0, 0x2f, 0x06, 0xa1, 0xa4, 0x8c, 0xce, 0x68, 0xde, - 0xf4, 0x25, 0x38, 0x9b, 0xf4, 0x25, 0x18, 0xaa, 0xf8, 0x75, 0xc3, 0x7d, 0x60, 0x23, 0x23, 0x76, - 0x5f, 0xde, 0x06, 0xd8, 0xfb, 0x9b, 0x06, 0x4d, 0x93, 0x5f, 0xec, 0xd9, 0x29, 0xa1, 0xaf, 0xa3, - 0x71, 0xe0, 0x0a, 0x4c, 0x7a, 0x3e, 0x93, 0xd1, 0x49, 0x5d, 0x0a, 0x60, 0x4c, 0xce, 0x2a, 0xe9, - 0xc1, 0x70, 0x12, 0x04, 0x38, 0x5d, 0x86, 0x56, 0xc8, 0x05, 0xa5, 0xa4, 0x35, 0x82, 0xcb, 0x51, - 0x58, 0x60, 0xd1, 0x63, 0xd0, 0xdf, 0xf2, 0xeb, 0xe5, 0x8a, 0x90, 0xcf, 0xb5, 0x88, 0xb1, 0xf5, - 0x72, 0x05, 0x73, 0x1c, 0x5a, 0x80, 0x01, 0xf6, 0x23, 0x9c, 0x1e, 0xc9, 0x8f, 0x7a, 0xc2, 0x4a, - 0x68, 0x79, 0xae, 0x58, 0x01, 0x2c, 0x0a, 0x32, 0xad, 0x28, 0xbd, 0xd4, 0x30, 0xad, 0xe8, 0xe0, - 0x7d, 0x6a, 0x45, 0x25, 0x03, 0x1c, 0xf3, 0x42, 0x77, 0xe1, 0xb4, 0x71, 0x91, 0xe4, 0x53, 0x84, - 0x84, 0x22, 0xf2, 0xc2, 0x63, 0x1d, 0x6f, 0x90, 0xc2, 0x89, 0xe1, 0x9c, 0x68, 0xf4, 0xe9, 0x72, - 0x16, 0x27, 0x9c, 0x5d, 0x01, 0x6a, 0xc0, 0x64, 0x2d, 0x55, 0xeb, 0x50, 0xef, 0xb5, 0xaa, 0x01, - 0x4d, 0xd7, 0x98, 0x66, 0x8c, 0x5e, 0x81, 0xa1, 0x37, 0xfd, 0x90, 0x9d, 0x6d, 0xe2, 0x4e, 0x21, - 0x9f, 0xed, 0x0f, 0xbd, 0x7e, 0xa3, 0xca, 0xe0, 0x87, 0x07, 0xb3, 0xc3, 0x15, 0xbf, 0x2e, 0xff, - 0x62, 0x55, 0x00, 0x7d, 0xbf, 0x05, 0x33, 0xe9, 0x9b, 0xaa, 0x6a, 0xf4, 0x68, 0xef, 0x8d, 0xb6, - 0x45, 0xa5, 0x33, 0x2b, 0xb9, 0xec, 0x70, 0x87, 0xaa, 0xec, 0x5f, 0xb4, 0x98, 0x6e, 0x55, 0x18, - 0x07, 0x49, 0xd8, 0x6e, 0x9c, 0x44, 0x7a, 0xdf, 0x15, 0xc3, 0x6e, 0x79, 0xdf, 0x4e, 0x2d, 0xff, - 0xc4, 0x62, 0x4e, 0x2d, 0x27, 0xf8, 0x7a, 0xe5, 0x75, 0x18, 0x8a, 0x64, 0xda, 0xe5, 0x0e, 0x19, - 0x89, 0xb5, 0x46, 0x31, 0xc7, 0x1e, 0x25, 0xe1, 0xab, 0x0c, 0xcb, 0x8a, 0x8d, 0xfd, 0x0f, 0xf8, - 0x08, 0x48, 0xcc, 0x09, 0x98, 0x87, 0x96, 0x4d, 0xf3, 0xd0, 0x6c, 0x97, 0x2f, 0xc8, 0x31, 0x13, - 0xfd, 0x7d, 0xb3, 0xdd, 0x4c, 0xb3, 0xf5, 0x4e, 0xf7, 0xa6, 0xb2, 0x3f, 0x6f, 0x01, 0xc4, 0x01, - 0xb9, 0x7b, 0x48, 0xac, 0xf7, 0x12, 0x95, 0xe9, 0xfd, 0xc8, 0xaf, 0xf9, 0x0d, 0x61, 0xfc, 0x7c, - 0x24, 0xb6, 0x50, 0x71, 0xf8, 0xa1, 0xf6, 0x1b, 0x2b, 0x6a, 0x34, 0x2b, 0xc3, 0xff, 0x15, 0x63, - 0x9b, 0xa9, 0x11, 0xfa, 0xef, 0x8b, 0x16, 0x9c, 0xca, 0x72, 0x85, 0xa6, 0x37, 0x44, 0xae, 0xe3, - 0x53, 0x9e, 0x6e, 0x6a, 0x34, 0x6f, 0x09, 0x38, 0x56, 0x14, 0x3d, 0x67, 0x2c, 0x3c, 0x5a, 0x24, - 0xec, 0x1b, 0x30, 0x5a, 0x09, 0x88, 0x76, 0xb8, 0xbe, 0xca, 0x43, 0x4a, 0xf0, 0xf6, 0x3c, 0x73, - 0xe4, 0x70, 0x12, 0xf6, 0x97, 0x0b, 0x70, 0x8a, 0x3b, 0x8c, 0x2c, 0xec, 0xf9, 0x6e, 0xbd, 0xe2, - 0xd7, 0xc5, 0x83, 0xb7, 0x8f, 0xc1, 0x48, 0x4b, 0x53, 0xcc, 0x76, 0x8a, 0xea, 0xaa, 0x2b, 0x70, - 0x63, 0x55, 0x92, 0x0e, 0xc5, 0x06, 0x2f, 0x54, 0x87, 0x11, 0xb2, 0xe7, 0xd6, 0x94, 0xd7, 0x41, - 0xe1, 0xc8, 0x07, 0x9d, 0xaa, 0x65, 0x45, 0xe3, 0x83, 0x0d, 0xae, 0x0f, 0x20, 0x8f, 0xb8, 0xfd, - 0xa3, 0x16, 0x3c, 0x94, 0x13, 0x03, 0x96, 0x56, 0x77, 0x87, 0xb9, 0xe6, 0x88, 0x69, 0xab, 0xaa, - 0xe3, 0x0e, 0x3b, 0x58, 0x60, 0xd1, 0x47, 0x00, 0xb8, 0xc3, 0x0d, 0xf1, 0x6a, 0x5d, 0x83, 0x65, - 0x1a, 0x71, 0xfe, 0xb4, 0x90, 0x6d, 0xb2, 0x3c, 0xd6, 0x78, 0xd9, 0x5f, 0xec, 0x83, 0x7e, 0xe6, - 0xe0, 0x81, 0x2a, 0x30, 0xb8, 0xc3, 0xb3, 0xfa, 0x74, 0x1c, 0x37, 0x4a, 0x2b, 0x13, 0x05, 0xc5, - 0xe3, 0xa6, 0x41, 0xb1, 0x64, 0x83, 0xd6, 0x60, 0x8a, 0x27, 0x57, 0x6a, 0x2c, 0x93, 0x86, 0xb3, - 0x2f, 0x75, 0x9e, 0x3c, 0x13, 0xb0, 0xd2, 0xfd, 0x96, 0xd3, 0x24, 0x38, 0xab, 0x1c, 0x7a, 0x15, - 0xc6, 0xe8, 0x1d, 0xd4, 0x6f, 0x47, 0x92, 0x13, 0x4f, 0xab, 0xa4, 0xc4, 0xf2, 0x0d, 0x03, 0x8b, - 0x13, 0xd4, 0xe8, 0x15, 0x18, 0x6d, 0xa5, 0xb4, 0xbb, 0xfd, 0xb1, 0x1a, 0xc4, 0xd4, 0xe8, 0x9a, - 0xb4, 0xcc, 0x1b, 0xba, 0xcd, 0x7c, 0xbf, 0x37, 0x76, 0x02, 0x12, 0xee, 0xf8, 0x8d, 0x3a, 0x13, - 0xff, 0xfa, 0x35, 0x6f, 0xe8, 0x04, 0x1e, 0xa7, 0x4a, 0x50, 0x2e, 0x5b, 0x8e, 0xdb, 0x68, 0x07, - 0x24, 0xe6, 0x32, 0x60, 0x72, 0x59, 0x4d, 0xe0, 0x71, 0xaa, 0x44, 0x77, 0xb5, 0xf5, 0xe0, 0xf1, - 0xa8, 0xad, 0xed, 0x9f, 0x2a, 0x80, 0x31, 0xb4, 0xdf, 0xbe, 0xe9, 0x9e, 0xe8, 0x97, 0x6d, 0x07, - 0xad, 0x9a, 0x70, 0x66, 0xca, 0xfc, 0xb2, 0x38, 0x8b, 0x2b, 0xff, 0x32, 0xfa, 0x1f, 0xb3, 0x52, - 0x74, 0x8d, 0x9f, 0xae, 0x04, 0x3e, 0x3d, 0xe4, 0x64, 0xd0, 0x31, 0xf5, 0xe8, 0x60, 0x50, 0x3e, - 0xc8, 0xee, 0x10, 0x9e, 0x53, 0xb8, 0x65, 0x73, 0x0e, 0x86, 0xdf, 0x4f, 0x55, 0x44, 0x46, 0x90, - 0x5c, 0xd0, 0x65, 0x18, 0x16, 0x39, 0x7c, 0x98, 0x6f, 0x3c, 0x5f, 0x4c, 0xcc, 0x4f, 0x69, 0x39, - 0x06, 0x63, 0x9d, 0xc6, 0xfe, 0x81, 0x02, 0x4c, 0x65, 0x3c, 0x6e, 0xe2, 0xc7, 0xc8, 0xb6, 0x1b, - 0x46, 0x2a, 0x51, 0xac, 0x76, 0x8c, 0x70, 0x38, 0x56, 0x14, 0x74, 0xaf, 0xe2, 0x07, 0x55, 0xf2, - 0x70, 0x12, 0x8f, 0x07, 0x04, 0xf6, 0x88, 0x29, 0x57, 0x2f, 0x40, 0x5f, 0x3b, 0x24, 0x32, 0xb0, - 0xae, 0x3a, 0xb6, 0x99, 0x4d, 0x97, 0x61, 0xe8, 0x35, 0x6a, 0x5b, 0x99, 0x47, 0xb5, 0x6b, 0x14, - 0x37, 0x90, 0x72, 0x1c, 0x6d, 0x5c, 0x44, 0x3c, 0xc7, 0x8b, 0xc4, 0x65, 0x2b, 0x8e, 0x10, 0xc9, - 0xa0, 0x58, 0x60, 0xed, 0x2f, 0x14, 0xe1, 0x6c, 0xee, 0x73, 0x47, 0xda, 0xf4, 0xa6, 0xef, 0xb9, - 0x91, 0xaf, 0x1c, 0xc0, 0x78, 0x54, 0x48, 0xd2, 0xda, 0x59, 0x13, 0x70, 0xac, 0x28, 0xd0, 0x45, - 0xe8, 0x67, 0x1a, 0xe1, 0x54, 0xca, 0xdc, 0xc5, 0x65, 0x1e, 0x26, 0x8c, 0xa3, 0x7b, 0xce, 0x72, - 0xfe, 0x18, 0x95, 0x60, 0xfc, 0x46, 0xf2, 0x40, 0xa1, 0xcd, 0xf5, 0xfd, 0x06, 0x66, 0x48, 0xf4, - 0x84, 0xe8, 0xaf, 0x84, 0xc7, 0x13, 0x76, 0xea, 0x7e, 0xa8, 0x75, 0xda, 0x53, 0x30, 0xb8, 0x4b, - 0xf6, 0x03, 0xd7, 0xdb, 0x4e, 0x7a, 0xc2, 0x5d, 0xe3, 0x60, 0x2c, 0xf1, 0x66, 0x8e, 0xc7, 0xc1, - 0xe3, 0x4e, 0x4f, 0x3e, 0xd4, 0x55, 0x3c, 0xf9, 0xa1, 0x22, 0x8c, 0xe3, 0xc5, 0xe5, 0x77, 0x07, - 0xe2, 0x66, 0x7a, 0x20, 0x8e, 0x3b, 0x3d, 0x79, 0xf7, 0xd1, 0xf8, 0x39, 0x0b, 0xc6, 0x59, 0x26, - 0x21, 0x11, 0xd4, 0xc0, 0xf5, 0xbd, 0x13, 0xb8, 0x0a, 0x3c, 0x06, 0xfd, 0x01, 0xad, 0x34, 0x99, - 0x2b, 0x97, 0xb5, 0x04, 0x73, 0x1c, 0x7a, 0x04, 0xfa, 0x58, 0x13, 0xe8, 0xe0, 0x8d, 0xf0, 0x2d, - 0x78, 0xd9, 0x89, 0x1c, 0xcc, 0xa0, 0x2c, 0x48, 0x16, 0x26, 0xad, 0x86, 0xcb, 0x1b, 0x1d, 0xdb, - 0xeb, 0xdf, 0x19, 0x81, 0x10, 0x32, 0x9b, 0xf6, 0xf6, 0x82, 0x64, 0x65, 0xb3, 0xec, 0x7c, 0xcd, - 0xfe, 0xc3, 0x02, 0x9c, 0xcf, 0x2c, 0xd7, 0x73, 0x90, 0xac, 0xce, 0xa5, 0x1f, 0x64, 0xae, 0x98, - 0xe2, 0x09, 0xfa, 0x19, 0xf7, 0xf5, 0x2a, 0xfd, 0xf7, 0xf7, 0x10, 0xbb, 0x2a, 0xb3, 0xcb, 0xde, - 0x21, 0xb1, 0xab, 0x32, 0xdb, 0x96, 0xa3, 0x26, 0xf8, 0xd3, 0x42, 0xce, 0xb7, 0x30, 0x85, 0xc1, - 0x25, 0xba, 0xcf, 0x30, 0x64, 0x28, 0x2f, 0xe1, 0x7c, 0x8f, 0xe1, 0x30, 0xac, 0xb0, 0x68, 0x01, - 0xc6, 0x9b, 0xae, 0x47, 0x37, 0x9f, 0x7d, 0x53, 0x14, 0x57, 0x8a, 0xfc, 0x35, 0x13, 0x8d, 0x93, - 0xf4, 0xc8, 0xd5, 0xe2, 0x5a, 0xf1, 0xaf, 0x7b, 0xe5, 0x48, 0xab, 0x6e, 0xce, 0xf4, 0x65, 0x50, - 0xbd, 0x98, 0x11, 0xe3, 0x6a, 0x4d, 0xd3, 0x13, 0x15, 0x7b, 0xd7, 0x13, 0x8d, 0x64, 0xeb, 0x88, - 0x66, 0x5e, 0x81, 0xd1, 0xfb, 0x36, 0x0c, 0xd8, 0x5f, 0x2f, 0xc2, 0xc3, 0x1d, 0x96, 0x3d, 0xdf, - 0xeb, 0x8d, 0x31, 0xd0, 0xf6, 0xfa, 0xd4, 0x38, 0x54, 0xe0, 0xd4, 0x56, 0xbb, 0xd1, 0xd8, 0x67, - 0xcf, 0x6f, 0x48, 0x5d, 0x52, 0x08, 0x99, 0x52, 0x2a, 0x47, 0x4e, 0xad, 0x66, 0xd0, 0xe0, 0xcc, - 0x92, 0xf4, 0x8a, 0x45, 0x4f, 0x92, 0x7d, 0xc5, 0x2a, 0x71, 0xc5, 0xc2, 0x3a, 0x12, 0x9b, 0xb4, - 0xe8, 0x0a, 0x4c, 0x3a, 0x7b, 0x8e, 0xcb, 0x83, 0x83, 0x4b, 0x06, 0xfc, 0x8e, 0xa5, 0xf4, 0xb9, - 0x0b, 0x49, 0x02, 0x9c, 0x2e, 0x83, 0x5e, 0x03, 0xe4, 0x6f, 0x32, 0x27, 0xfd, 0xfa, 0x15, 0xe2, - 0x09, 0x93, 0x33, 0x1b, 0xbb, 0x62, 0xbc, 0x25, 0xdc, 0x48, 0x51, 0xe0, 0x8c, 0x52, 0x89, 0x20, - 0x4e, 0x03, 0xf9, 0x41, 0x9c, 0x3a, 0xef, 0x8b, 0x5d, 0xd3, 0x14, 0x5d, 0x86, 0xd1, 0x23, 0xba, - 0x9e, 0xda, 0xff, 0xc1, 0xa2, 0x27, 0x1e, 0x2f, 0x63, 0x46, 0x48, 0x7d, 0x85, 0xf9, 0xc6, 0x72, - 0xf5, 0xb0, 0x16, 0x25, 0xe7, 0xb4, 0xe6, 0x1b, 0x1b, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, - 0xd3, 0x6a, 0xdc, 0x0a, 0x44, 0x18, 0x37, 0x45, 0x81, 0x3e, 0x0a, 0x83, 0x75, 0x77, 0xcf, 0x0d, - 0x85, 0x72, 0xec, 0xc8, 0x96, 0xa8, 0x78, 0xeb, 0x5c, 0xe6, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x50, - 0x21, 0xee, 0x93, 0xd7, 0xdb, 0x7e, 0xe4, 0x9c, 0xc0, 0x49, 0x7e, 0xc5, 0x38, 0xc9, 0x9f, 0xe8, - 0x14, 0xcb, 0x8e, 0x35, 0x29, 0xf7, 0x04, 0xbf, 0x91, 0x38, 0xc1, 0x9f, 0xec, 0xce, 0xaa, 0xf3, - 0xc9, 0xfd, 0x0f, 0x2d, 0x98, 0x34, 0xe8, 0x4f, 0xe0, 0x00, 0x59, 0x35, 0x0f, 0x90, 0x47, 0xbb, - 0x7e, 0x43, 0xce, 0xc1, 0xf1, 0xbd, 0xc5, 0x44, 0xdb, 0xd9, 0x81, 0xf1, 0x26, 0xf4, 0xed, 0x38, - 0x41, 0xbd, 0x53, 0xee, 0x8e, 0x54, 0xa1, 0xb9, 0xab, 0x4e, 0x20, 0xcc, 0xf4, 0xcf, 0xc8, 0x5e, - 0xa7, 0xa0, 0xae, 0x26, 0x7a, 0x56, 0x15, 0x7a, 0x09, 0x06, 0xc2, 0x9a, 0xdf, 0x52, 0xef, 0x75, - 0x2e, 0xb0, 0x8e, 0x66, 0x90, 0xc3, 0x83, 0x59, 0x64, 0x56, 0x47, 0xc1, 0x58, 0xd0, 0xa3, 0x8f, - 0xc1, 0x28, 0xfb, 0xa5, 0x7c, 0xe6, 0x8a, 0xf9, 0x1a, 0x8c, 0xaa, 0x4e, 0xc8, 0x1d, 0x4a, 0x0d, - 0x10, 0x36, 0x59, 0xcd, 0x6c, 0x43, 0x49, 0x7d, 0xd6, 0x03, 0x35, 0xf5, 0xfe, 0xbb, 0x22, 0x4c, - 0x65, 0xcc, 0x39, 0x14, 0x1a, 0x23, 0x71, 0xb9, 0xc7, 0xa9, 0xfa, 0x36, 0xc7, 0x22, 0x64, 0x17, - 0xa8, 0xba, 0x98, 0x5b, 0x3d, 0x57, 0x7a, 0x33, 0x24, 0xc9, 0x4a, 0x29, 0xa8, 0x7b, 0xa5, 0xb4, - 0xb2, 0x13, 0xeb, 0x6a, 0x5a, 0x91, 0x6a, 0xe9, 0x03, 0x1d, 0xd3, 0x5f, 0xee, 0x83, 0x53, 0x59, - 0xe1, 0x35, 0xd1, 0x67, 0x12, 0x99, 0x63, 0x5f, 0xe8, 0x35, 0x30, 0x27, 0x4f, 0x27, 0x2b, 0xc2, - 0xfe, 0xcd, 0x99, 0xb9, 0x64, 0xbb, 0x76, 0xb3, 0xa8, 0x93, 0x05, 0x1e, 0x09, 0x78, 0xc6, 0x5f, - 0xb9, 0x7d, 0xbc, 0xbf, 0xe7, 0x06, 0x88, 0x54, 0xc1, 0x61, 0xc2, 0x1f, 0x47, 0x82, 0xbb, 0xfb, - 0xe3, 0xc8, 0x9a, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0xd1, 0xa3, 0xd8, 0x7d, 0x0b, 0xe3, 0x5e, 0x1e, - 0x6a, 0x03, 0x16, 0xde, 0x1d, 0x82, 0xc1, 0x8c, 0x0b, 0xc3, 0x5a, 0xc7, 0x3c, 0xd0, 0xc9, 0xb3, - 0x4b, 0x0f, 0x3e, 0xad, 0x0b, 0x1e, 0xe8, 0x04, 0xfa, 0x51, 0x0b, 0x12, 0xaf, 0x3d, 0x94, 0x52, - 0xce, 0xca, 0x55, 0xca, 0x5d, 0x80, 0xbe, 0xc0, 0x6f, 0x90, 0x64, 0xb6, 0x56, 0xec, 0x37, 0x08, - 0x66, 0x18, 0x4a, 0x11, 0xc5, 0xaa, 0x96, 0x11, 0xfd, 0x1a, 0x29, 0x2e, 0x88, 0x8f, 0x41, 0x7f, - 0x83, 0xec, 0x91, 0x46, 0x32, 0xa9, 0xd6, 0x75, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x5c, 0x1f, 0x9c, - 0xeb, 0x18, 0x05, 0x88, 0x5e, 0xc6, 0xb6, 0x9d, 0x88, 0xdc, 0x71, 0xf6, 0x93, 0xd9, 0x6f, 0xae, - 0x70, 0x30, 0x96, 0x78, 0xf6, 0xf4, 0x90, 0x07, 0xb1, 0x4f, 0xa8, 0x30, 0x45, 0xec, 0x7a, 0x81, - 0x35, 0x55, 0x62, 0xc5, 0xe3, 0x50, 0x89, 0x3d, 0x07, 0x10, 0x86, 0x0d, 0xee, 0x13, 0x57, 0x17, - 0x6f, 0x1a, 0xe3, 0x64, 0x07, 0xd5, 0xeb, 0x02, 0x83, 0x35, 0x2a, 0xb4, 0x0c, 0x13, 0xad, 0xc0, - 0x8f, 0xb8, 0x46, 0x78, 0x99, 0xbb, 0x8d, 0xf6, 0x9b, 0x01, 0x58, 0x2a, 0x09, 0x3c, 0x4e, 0x95, - 0x40, 0x2f, 0xc2, 0xb0, 0x08, 0xca, 0x52, 0xf1, 0xfd, 0x86, 0x50, 0x42, 0x29, 0x4f, 0xca, 0x6a, - 0x8c, 0xc2, 0x3a, 0x9d, 0x56, 0x8c, 0xa9, 0x99, 0x07, 0x33, 0x8b, 0x71, 0x55, 0xb3, 0x46, 0x97, - 0x88, 0xda, 0x3b, 0xd4, 0x53, 0xd4, 0xde, 0x58, 0x2d, 0x57, 0xea, 0xd9, 0xea, 0x09, 0x5d, 0x15, - 0x59, 0x5f, 0xe9, 0x83, 0x29, 0x31, 0x71, 0x1e, 0xf4, 0x74, 0xb9, 0x99, 0x9e, 0x2e, 0xc7, 0xa1, - 0xb8, 0x7b, 0x77, 0xce, 0x9c, 0xf4, 0x9c, 0xf9, 0x61, 0x0b, 0x4c, 0x49, 0x0d, 0xfd, 0x99, 0xdc, - 0xf4, 0x61, 0x2f, 0xe6, 0x4a, 0x7e, 0xca, 0x6b, 0xf0, 0x6d, 0x26, 0x12, 0xb3, 0xff, 0xbd, 0x05, - 0x8f, 0x76, 0xe5, 0x88, 0x56, 0xa0, 0xc4, 0xc4, 0x49, 0xed, 0xa2, 0xf7, 0xa4, 0x72, 0x2b, 0x97, - 0x88, 0x1c, 0xe9, 0x36, 0x2e, 0x89, 0x56, 0x52, 0x79, 0xda, 0x9e, 0xca, 0xc8, 0xd3, 0x76, 0xda, - 0xe8, 0x9e, 0xfb, 0x4c, 0xd4, 0xf6, 0x83, 0xf4, 0xc4, 0x31, 0x9e, 0x74, 0xa1, 0xf7, 0x1b, 0x4a, - 0x47, 0x3b, 0xa1, 0x74, 0x44, 0x26, 0xb5, 0x76, 0x86, 0x7c, 0x18, 0x26, 0x58, 0xb4, 0x36, 0xf6, - 0xc8, 0x41, 0x3c, 0x36, 0x2b, 0xc4, 0x8e, 0xcc, 0xd7, 0x13, 0x38, 0x9c, 0xa2, 0xb6, 0xff, 0xa0, - 0x08, 0x03, 0x7c, 0xf9, 0x9d, 0xc0, 0xf5, 0xf2, 0x69, 0x28, 0xb9, 0xcd, 0x66, 0x9b, 0xa7, 0xde, - 0xea, 0x8f, 0xdd, 0x62, 0xcb, 0x12, 0x88, 0x63, 0x3c, 0x5a, 0x15, 0xfa, 0xee, 0x0e, 0x01, 0x61, - 0x79, 0xc3, 0xe7, 0x96, 0x9d, 0xc8, 0xe1, 0xb2, 0x92, 0x3a, 0x67, 0x63, 0xcd, 0x38, 0xfa, 0x24, - 0x40, 0x18, 0x05, 0xae, 0xb7, 0x4d, 0x61, 0x22, 0x0e, 0xf5, 0x7b, 0x3b, 0x70, 0xab, 0x2a, 0x62, - 0xce, 0x33, 0xde, 0x73, 0x14, 0x02, 0x6b, 0x1c, 0xd1, 0x9c, 0x71, 0xd2, 0xcf, 0x24, 0xc6, 0x0e, - 0x38, 0xd7, 0x78, 0xcc, 0x66, 0x3e, 0x00, 0x25, 0xc5, 0xbc, 0x9b, 0xf6, 0x6b, 0x44, 0x17, 0x8b, - 0x3e, 0x04, 0xe3, 0x89, 0xb6, 0x1d, 0x49, 0x79, 0xf6, 0xf3, 0x16, 0x8c, 0xf3, 0xc6, 0xac, 0x78, - 0x7b, 0xe2, 0x34, 0x78, 0x0b, 0x4e, 0x35, 0x32, 0x76, 0x65, 0x31, 0xfc, 0xbd, 0xef, 0xe2, 0x4a, - 0x59, 0x96, 0x85, 0xc5, 0x99, 0x75, 0xa0, 0x4b, 0x74, 0xc5, 0xd1, 0x5d, 0xd7, 0x69, 0x88, 0xb7, - 0xf5, 0x23, 0x7c, 0xb5, 0x71, 0x18, 0x56, 0x58, 0xfb, 0xb7, 0x2d, 0x98, 0xe4, 0x2d, 0xbf, 0x46, - 0xf6, 0xd5, 0xde, 0xf4, 0xcd, 0x6c, 0xbb, 0x48, 0xfa, 0x58, 0xc8, 0x49, 0xfa, 0xa8, 0x7f, 0x5a, - 0xb1, 0xe3, 0xa7, 0x7d, 0xd9, 0x02, 0x31, 0x43, 0x4e, 0x40, 0x9f, 0xf1, 0x9d, 0xa6, 0x3e, 0x63, - 0x26, 0x7f, 0x11, 0xe4, 0x28, 0x32, 0xfe, 0xc4, 0x82, 0x09, 0x4e, 0x10, 0xdb, 0xea, 0xbf, 0xa9, - 0xe3, 0xd0, 0x4b, 0x6a, 0xf8, 0x6b, 0x64, 0x7f, 0xc3, 0xaf, 0x38, 0xd1, 0x4e, 0xf6, 0x47, 0x19, - 0x83, 0xd5, 0xd7, 0x71, 0xb0, 0xea, 0x72, 0x01, 0x19, 0x39, 0x91, 0xba, 0xbc, 0x90, 0x3f, 0x6a, - 0x4e, 0x24, 0xfb, 0x1b, 0x16, 0x20, 0x5e, 0x8d, 0x21, 0xb8, 0x51, 0x71, 0x88, 0x41, 0xb5, 0x83, - 0x2e, 0xde, 0x9a, 0x14, 0x06, 0x6b, 0x54, 0xc7, 0xd2, 0x3d, 0x09, 0x87, 0x8b, 0x62, 0x77, 0x87, - 0x8b, 0x23, 0xf4, 0xe8, 0xbf, 0x1c, 0x80, 0xe4, 0xb3, 0x36, 0x74, 0x0b, 0x46, 0x6a, 0x4e, 0xcb, - 0xd9, 0x74, 0x1b, 0x6e, 0xe4, 0x92, 0xb0, 0x93, 0x37, 0xd6, 0x92, 0x46, 0x27, 0x4c, 0xe4, 0x1a, - 0x04, 0x1b, 0x7c, 0xd0, 0x1c, 0x40, 0x2b, 0x70, 0xf7, 0xdc, 0x06, 0xd9, 0x66, 0x6a, 0x17, 0x16, - 0xcd, 0x83, 0xbb, 0x86, 0x49, 0x28, 0xd6, 0x28, 0x32, 0x62, 0x08, 0x14, 0x1f, 0x70, 0x0c, 0x01, - 0x38, 0xb1, 0x18, 0x02, 0x7d, 0x47, 0x8a, 0x21, 0x30, 0x74, 0xe4, 0x18, 0x02, 0xfd, 0x3d, 0xc5, - 0x10, 0xc0, 0x70, 0x46, 0xca, 0x9e, 0xf4, 0xff, 0xaa, 0xdb, 0x20, 0xe2, 0xc2, 0xc1, 0x43, 0x90, - 0xcc, 0xdc, 0x3b, 0x98, 0x3d, 0x83, 0x33, 0x29, 0x70, 0x4e, 0x49, 0xf4, 0x11, 0x98, 0x76, 0x1a, - 0x0d, 0xff, 0x8e, 0x1a, 0xd4, 0x95, 0xb0, 0xe6, 0x34, 0xb8, 0x09, 0x64, 0x90, 0x71, 0x7d, 0xe4, - 0xde, 0xc1, 0xec, 0xf4, 0x42, 0x0e, 0x0d, 0xce, 0x2d, 0x8d, 0x3e, 0x08, 0xa5, 0x56, 0xe0, 0xd7, - 0xd6, 0xb4, 0xb7, 0xb7, 0xe7, 0x69, 0x07, 0x56, 0x24, 0xf0, 0xf0, 0x60, 0x76, 0x54, 0xfd, 0x61, - 0x07, 0x7e, 0x5c, 0x20, 0x23, 0x28, 0xc0, 0xf0, 0xb1, 0x06, 0x05, 0xd8, 0x85, 0xa9, 0x2a, 0x09, - 0x5c, 0xa7, 0xe1, 0xbe, 0x45, 0xe5, 0x65, 0xb9, 0x3f, 0x6d, 0x40, 0x29, 0x48, 0xec, 0xc8, 0x3d, - 0x05, 0x69, 0xd5, 0x92, 0xd3, 0xc8, 0x1d, 0x38, 0x66, 0x64, 0xff, 0x6f, 0x0b, 0x06, 0xc5, 0x33, - 0xb6, 0x13, 0x90, 0x1a, 0x17, 0x0c, 0xa3, 0xc4, 0x6c, 0x76, 0x87, 0xb1, 0xc6, 0xe4, 0x9a, 0x23, - 0xca, 0x09, 0x73, 0xc4, 0xa3, 0x9d, 0x98, 0x74, 0x36, 0x44, 0xfc, 0xd5, 0x22, 0x95, 0xde, 0x8d, - 0x07, 0xd5, 0x0f, 0xbe, 0x0b, 0xd6, 0x61, 0x30, 0x14, 0x0f, 0x7a, 0x0b, 0xf9, 0x2f, 0x2a, 0x92, - 0x83, 0x18, 0x7b, 0xd1, 0x89, 0x27, 0xbc, 0x92, 0x49, 0xe6, 0x4b, 0xe1, 0xe2, 0x03, 0x7c, 0x29, - 0xdc, 0xed, 0xc9, 0x79, 0xdf, 0x71, 0x3c, 0x39, 0xb7, 0xbf, 0xc6, 0x4e, 0x4e, 0x1d, 0x7e, 0x02, - 0x42, 0xd5, 0x15, 0xf3, 0x8c, 0xb5, 0x3b, 0xcc, 0x2c, 0xd1, 0xa8, 0x1c, 0xe1, 0xea, 0x67, 0x2d, - 0x38, 0x97, 0xf1, 0x55, 0x9a, 0xa4, 0xf5, 0x0c, 0x0c, 0x39, 0xed, 0xba, 0xab, 0xd6, 0xb2, 0x66, - 0x9a, 0x5c, 0x10, 0x70, 0xac, 0x28, 0xd0, 0x12, 0x4c, 0x92, 0xbb, 0x2d, 0x97, 0x1b, 0x72, 0x75, - 0xe7, 0xe3, 0x22, 0x7f, 0xfb, 0xb8, 0x92, 0x44, 0xe2, 0x34, 0xbd, 0x0a, 0x4e, 0x54, 0xcc, 0x0d, - 0x4e, 0xf4, 0xb7, 0x2d, 0x18, 0x56, 0x4f, 0x5a, 0x1f, 0x78, 0x6f, 0x7f, 0xd8, 0xec, 0xed, 0x87, - 0x3b, 0xf4, 0x76, 0x4e, 0x37, 0xff, 0x66, 0x41, 0xb5, 0xb7, 0xe2, 0x07, 0x51, 0x0f, 0x12, 0xdc, - 0xfd, 0x3f, 0x9c, 0xb8, 0x0c, 0xc3, 0x4e, 0xab, 0x25, 0x11, 0xd2, 0x03, 0x8e, 0x85, 0xdc, 0x8e, - 0xc1, 0x58, 0xa7, 0x51, 0xef, 0x38, 0x8a, 0xb9, 0xef, 0x38, 0xea, 0x00, 0x91, 0x13, 0x6c, 0x93, - 0x88, 0xc2, 0x84, 0xc3, 0x6e, 0xfe, 0x7e, 0xd3, 0x8e, 0xdc, 0xc6, 0x9c, 0xeb, 0x45, 0x61, 0x14, - 0xcc, 0x95, 0xbd, 0xe8, 0x46, 0xc0, 0xaf, 0x90, 0x5a, 0x78, 0x2f, 0xc5, 0x0b, 0x6b, 0x7c, 0x65, - 0xf8, 0x06, 0x56, 0x47, 0xbf, 0xe9, 0x4a, 0xb1, 0x2e, 0xe0, 0x58, 0x51, 0xd8, 0x1f, 0x60, 0xa7, - 0x0f, 0xeb, 0xd3, 0xa3, 0x85, 0xb6, 0xfa, 0xf2, 0x88, 0x1a, 0x0d, 0x66, 0x14, 0x5d, 0xd6, 0x03, - 0x68, 0x75, 0xde, 0xec, 0x69, 0xc5, 0xfa, 0xab, 0xc2, 0x38, 0xca, 0x16, 0xfa, 0x78, 0xca, 0x3d, - 0xe6, 0xd9, 0x2e, 0xa7, 0xc6, 0x11, 0x1c, 0x62, 0x58, 0xfe, 0x1d, 0x96, 0x9d, 0xa4, 0x5c, 0x11, - 0xeb, 0x42, 0xcb, 0xbf, 0x23, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa3, 0x38, - 0x0e, 0xad, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0xe6, 0x85, 0x42, 0x81, 0xdb, 0x05, 0x1e, 0x4e, 0x28, - 0x14, 0x64, 0x77, 0x69, 0x5a, 0xa0, 0xcb, 0x30, 0xac, 0xb2, 0xad, 0x57, 0x78, 0xe6, 0x2b, 0x31, - 0xcd, 0x56, 0x62, 0x30, 0xd6, 0x69, 0xd0, 0x06, 0x8c, 0x87, 0x5c, 0xcf, 0xa6, 0x82, 0x83, 0x73, - 0x7d, 0xe5, 0x7b, 0xd5, 0x63, 0x62, 0x13, 0x7d, 0xc8, 0x40, 0x7c, 0x77, 0x92, 0x21, 0x16, 0x92, - 0x2c, 0xd0, 0xab, 0x30, 0xd6, 0xf0, 0x9d, 0xfa, 0xa2, 0xd3, 0x70, 0xbc, 0x1a, 0xeb, 0x9f, 0x21, - 0x33, 0x69, 0xef, 0x75, 0x03, 0x8b, 0x13, 0xd4, 0x54, 0x78, 0xd3, 0x21, 0x22, 0x44, 0x98, 0xe3, - 0x6d, 0x93, 0x50, 0xe4, 0xce, 0x66, 0xc2, 0xdb, 0xf5, 0x1c, 0x1a, 0x9c, 0x5b, 0x1a, 0xbd, 0x04, - 0x23, 0xf2, 0xf3, 0xb5, 0x88, 0x24, 0xf1, 0x93, 0x18, 0x0d, 0x87, 0x0d, 0x4a, 0x74, 0x07, 0x4e, - 0xcb, 0xff, 0x1b, 0x81, 0xb3, 0xb5, 0xe5, 0xd6, 0xc4, 0x33, 0x7d, 0xfe, 0x76, 0x76, 0x41, 0x3e, - 0xf0, 0x5c, 0xc9, 0x22, 0x3a, 0x3c, 0x98, 0xbd, 0x20, 0x7a, 0x2d, 0x13, 0xcf, 0x06, 0x31, 0x9b, - 0x3f, 0x5a, 0x83, 0xa9, 0x1d, 0xe2, 0x34, 0xa2, 0x9d, 0xa5, 0x1d, 0x52, 0xdb, 0x95, 0x8b, 0x8e, - 0xc5, 0x39, 0xd1, 0x9e, 0x8f, 0x5c, 0x4d, 0x93, 0xe0, 0xac, 0x72, 0xe8, 0x0d, 0x98, 0x6e, 0xb5, - 0x37, 0x1b, 0x6e, 0xb8, 0xb3, 0xee, 0x47, 0xcc, 0x11, 0x49, 0x25, 0x6f, 0x17, 0x01, 0x51, 0x54, - 0x24, 0x99, 0x4a, 0x0e, 0x1d, 0xce, 0xe5, 0x80, 0xde, 0x82, 0xd3, 0x89, 0xc9, 0x20, 0x42, 0x42, - 0x8c, 0xe5, 0xa7, 0x07, 0xa9, 0x66, 0x15, 0x10, 0xd1, 0x55, 0xb2, 0x50, 0x38, 0xbb, 0x0a, 0xf4, - 0x32, 0x80, 0xdb, 0x5a, 0x75, 0x9a, 0x6e, 0x83, 0x5e, 0x17, 0xa7, 0xd8, 0x3c, 0xa1, 0x57, 0x07, - 0x28, 0x57, 0x24, 0x94, 0xee, 0xcf, 0xe2, 0xdf, 0x3e, 0xd6, 0xa8, 0xd1, 0x75, 0x18, 0x13, 0xff, - 0xf6, 0xc5, 0xb0, 0xf2, 0xc8, 0x24, 0x8f, 0xb3, 0xb0, 0x52, 0x15, 0x1d, 0x73, 0x98, 0x82, 0xe0, - 0x44, 0x59, 0xb4, 0x0d, 0xe7, 0x64, 0xaa, 0x37, 0x7d, 0x8e, 0xca, 0x31, 0x08, 0x59, 0x4e, 0x8e, - 0x21, 0xfe, 0x32, 0x65, 0xa1, 0x13, 0x21, 0xee, 0xcc, 0x87, 0x9e, 0xed, 0xfa, 0x54, 0xe7, 0x6f, - 0x77, 0x4f, 0x73, 0x2f, 0x27, 0x7a, 0xb6, 0x5f, 0x4f, 0x22, 0x71, 0x9a, 0x1e, 0x85, 0x70, 0xda, - 0xf5, 0xb2, 0x66, 0xf6, 0x19, 0xc6, 0xe8, 0x43, 0xfc, 0xd9, 0x72, 0xe7, 0x59, 0x9d, 0x89, 0xe7, - 0xb3, 0x3a, 0x93, 0xf7, 0xdb, 0xf3, 0xff, 0xfb, 0x2d, 0x8b, 0x96, 0xd6, 0xa4, 0x74, 0xf4, 0x29, - 0x18, 0xd1, 0x3f, 0x4c, 0x48, 0x1c, 0x17, 0xb3, 0x85, 0x58, 0x6d, 0x6f, 0xe0, 0x32, 0xbe, 0x5a, - 0xff, 0x3a, 0x0e, 0x1b, 0x1c, 0x51, 0x2d, 0xe3, 0x81, 0xff, 0x7c, 0x6f, 0x12, 0x4d, 0xef, 0xee, - 0x6f, 0x04, 0xb2, 0xa7, 0x3c, 0xba, 0x0e, 0x43, 0xb5, 0x86, 0x4b, 0xbc, 0xa8, 0x5c, 0xe9, 0x14, - 0xc2, 0x70, 0x49, 0xd0, 0x88, 0x35, 0x24, 0x52, 0x6c, 0x70, 0x18, 0x56, 0x1c, 0xec, 0x5f, 0x2d, - 0xc0, 0x6c, 0x97, 0x7c, 0x2d, 0x09, 0x73, 0x94, 0xd5, 0x93, 0x39, 0x6a, 0x01, 0xc6, 0xe3, 0x7f, - 0xba, 0xa6, 0x4b, 0x79, 0xb4, 0xde, 0x32, 0xd1, 0x38, 0x49, 0xdf, 0xf3, 0xe3, 0x04, 0xdd, 0xa2, - 0xd5, 0xd7, 0xf5, 0x79, 0x8d, 0x61, 0xc9, 0xee, 0xef, 0xfd, 0xfa, 0x9b, 0x6b, 0x95, 0xb4, 0xbf, - 0x56, 0x80, 0xd3, 0xaa, 0x0b, 0xbf, 0x7d, 0x3b, 0xee, 0x66, 0xba, 0xe3, 0x8e, 0xc1, 0xa6, 0x6b, - 0xdf, 0x80, 0x01, 0x1e, 0x93, 0xb1, 0x07, 0xb1, 0xfb, 0x31, 0x33, 0x52, 0xb3, 0x92, 0xf4, 0x8c, - 0x68, 0xcd, 0xdf, 0x6f, 0xc1, 0x78, 0xe2, 0x95, 0x1b, 0xc2, 0xda, 0x53, 0xe8, 0xfb, 0x11, 0x8d, - 0xb3, 0x84, 0xee, 0x0b, 0xd0, 0xb7, 0xe3, 0x87, 0x51, 0xd2, 0xe1, 0xe3, 0xaa, 0x1f, 0x46, 0x98, - 0x61, 0xec, 0xdf, 0xb1, 0xa0, 0x7f, 0xc3, 0x71, 0xbd, 0x48, 0x1a, 0x07, 0xac, 0x1c, 0xe3, 0x40, - 0x2f, 0xdf, 0x85, 0x5e, 0x84, 0x01, 0xb2, 0xb5, 0x45, 0x6a, 0x91, 0x18, 0x55, 0x19, 0x47, 0x62, - 0x60, 0x85, 0x41, 0xa9, 0x1c, 0xc8, 0x2a, 0xe3, 0x7f, 0xb1, 0x20, 0x46, 0xb7, 0xa1, 0x14, 0xb9, - 0x4d, 0xb2, 0x50, 0xaf, 0x0b, 0x93, 0xf9, 0x7d, 0xc4, 0xc2, 0xd8, 0x90, 0x0c, 0x70, 0xcc, 0xcb, - 0xfe, 0x42, 0x01, 0x20, 0x0e, 0x66, 0xd5, 0xed, 0x13, 0x17, 0x53, 0xc6, 0xd4, 0x8b, 0x19, 0xc6, - 0x54, 0x14, 0x33, 0xcc, 0xb0, 0xa4, 0xaa, 0x6e, 0x2a, 0xf6, 0xd4, 0x4d, 0x7d, 0x47, 0xe9, 0xa6, - 0x25, 0x98, 0x8c, 0x83, 0x71, 0x99, 0xb1, 0x08, 0xd9, 0xf1, 0xb9, 0x91, 0x44, 0xe2, 0x34, 0xbd, - 0x4d, 0xe0, 0x82, 0x8a, 0x49, 0x24, 0x4e, 0x34, 0xe6, 0x0f, 0xae, 0x1b, 0xa7, 0xbb, 0xf4, 0x53, - 0x6c, 0x2d, 0x2e, 0xe4, 0x5a, 0x8b, 0x7f, 0xc2, 0x82, 0x53, 0xc9, 0x7a, 0xd8, 0xe3, 0xe9, 0xcf, - 0x5b, 0x70, 0x9a, 0xd9, 0xcc, 0x59, 0xad, 0x69, 0x0b, 0xfd, 0x0b, 0x1d, 0xe3, 0x2c, 0xe5, 0xb4, - 0x38, 0x0e, 0x58, 0xb2, 0x96, 0xc5, 0x1a, 0x67, 0xd7, 0x68, 0xff, 0xaf, 0x3e, 0x98, 0xce, 0x0b, - 0xd0, 0xc4, 0x9e, 0x8b, 0x38, 0x77, 0xab, 0xbb, 0xe4, 0x8e, 0x70, 0xca, 0x8f, 0x9f, 0x8b, 0x70, - 0x30, 0x96, 0xf8, 0x64, 0x0a, 0x8e, 0x42, 0x8f, 0x29, 0x38, 0x76, 0x60, 0xf2, 0xce, 0x0e, 0xf1, - 0x6e, 0x7a, 0xa1, 0x13, 0xb9, 0xe1, 0x96, 0xcb, 0xec, 0xcb, 0x7c, 0xde, 0xc8, 0xbc, 0xbd, 0x93, - 0xb7, 0x93, 0x04, 0x87, 0x07, 0xb3, 0xe7, 0x0c, 0x40, 0xdc, 0x64, 0xbe, 0x91, 0xe0, 0x34, 0xd3, - 0x74, 0x06, 0x93, 0xbe, 0x07, 0x9c, 0xc1, 0xa4, 0xe9, 0x0a, 0xaf, 0x14, 0xf9, 0x16, 0x80, 0xdd, - 0x1c, 0xd7, 0x14, 0x14, 0x6b, 0x14, 0xe8, 0x13, 0x80, 0xf4, 0x0c, 0x4d, 0x46, 0x7c, 0xcc, 0x67, - 0xef, 0x1d, 0xcc, 0xa2, 0xf5, 0x14, 0xf6, 0xf0, 0x60, 0x76, 0x8a, 0x42, 0xcb, 0x1e, 0xbd, 0x81, - 0xc6, 0x41, 0xc5, 0x32, 0x18, 0xa1, 0xdb, 0x30, 0x41, 0xa1, 0x6c, 0x45, 0xc9, 0xe0, 0x9b, 0xfc, - 0xd6, 0xf8, 0xf4, 0xbd, 0x83, 0xd9, 0x89, 0xf5, 0x04, 0x2e, 0x8f, 0x75, 0x8a, 0x09, 0x7a, 0x19, - 0xc6, 0xe2, 0x79, 0x75, 0x8d, 0xec, 0xf3, 0x60, 0x37, 0x25, 0xae, 0xf8, 0x5e, 0x33, 0x30, 0x38, - 0x41, 0x69, 0x7f, 0xde, 0x82, 0xb3, 0xb9, 0x59, 0xc4, 0xd1, 0x25, 0x18, 0x72, 0x5a, 0x2e, 0x37, - 0x63, 0x88, 0xa3, 0x86, 0xa9, 0xcb, 0x2a, 0x65, 0x6e, 0xc4, 0x50, 0x58, 0xba, 0xc3, 0xef, 0xba, - 0x5e, 0x3d, 0xb9, 0xc3, 0x5f, 0x73, 0xbd, 0x3a, 0x66, 0x18, 0x75, 0x64, 0x15, 0x73, 0x9f, 0x24, - 0x7c, 0x85, 0xae, 0xd5, 0x8c, 0x7c, 0xe3, 0x27, 0xdb, 0x0c, 0xf4, 0xb4, 0x6e, 0x72, 0x14, 0xde, - 0x85, 0xb9, 0xe6, 0xc6, 0xef, 0xb3, 0x40, 0x3c, 0x61, 0xee, 0xe1, 0x4c, 0xfe, 0x18, 0x8c, 0xec, - 0xa5, 0xb3, 0xd7, 0x5d, 0xc8, 0x7f, 0xd3, 0x2d, 0xa2, 0x7e, 0x2b, 0x41, 0xdb, 0xc8, 0x54, 0x67, - 0xf0, 0xb2, 0xeb, 0x20, 0xb0, 0xcb, 0x84, 0x19, 0x16, 0xba, 0xb7, 0xe6, 0x39, 0x80, 0x3a, 0xa3, - 0x65, 0x29, 0x6d, 0x0b, 0xa6, 0xc4, 0xb5, 0xac, 0x30, 0x58, 0xa3, 0xb2, 0xff, 0x75, 0x01, 0x86, - 0x65, 0xb6, 0xb4, 0xb6, 0xd7, 0x8b, 0xfa, 0xef, 0x48, 0xe9, 0x93, 0xd1, 0x3c, 0x94, 0x98, 0x7e, - 0xba, 0x12, 0x6b, 0x4d, 0x95, 0x76, 0x68, 0x4d, 0x22, 0x70, 0x4c, 0x43, 0x77, 0xc7, 0xb0, 0xbd, - 0xc9, 0xc8, 0x13, 0x0f, 0x6e, 0xab, 0x1c, 0x8c, 0x25, 0x1e, 0x7d, 0x04, 0x26, 0x78, 0xb9, 0xc0, - 0x6f, 0x39, 0xdb, 0xdc, 0xa6, 0xd5, 0xaf, 0xa2, 0x98, 0x4c, 0xac, 0x25, 0x70, 0x87, 0x07, 0xb3, - 0xa7, 0x92, 0x30, 0x66, 0xac, 0x4d, 0x71, 0x61, 0xae, 0x6b, 0xbc, 0x12, 0xba, 0xab, 0xa7, 0x3c, - 0xde, 0x62, 0x14, 0xd6, 0xe9, 0xec, 0x4f, 0x01, 0x4a, 0xe7, 0x8d, 0x43, 0xaf, 0x71, 0xd7, 0x67, - 0x37, 0x20, 0xf5, 0x4e, 0xc6, 0x5b, 0x3d, 0x56, 0x87, 0x7c, 0x2b, 0xc7, 0x4b, 0x61, 0x55, 0xde, - 0xfe, 0x0b, 0x45, 0x98, 0x48, 0x46, 0x07, 0x40, 0x57, 0x61, 0x80, 0x8b, 0x94, 0x82, 0x7d, 0x07, - 0xdf, 0x20, 0x2d, 0xa6, 0x00, 0x3b, 0x5c, 0x85, 0x54, 0x2a, 0xca, 0xa3, 0x37, 0x60, 0xb8, 0xee, - 0xdf, 0xf1, 0xee, 0x38, 0x41, 0x7d, 0xa1, 0x52, 0x16, 0xd3, 0x39, 0x53, 0x59, 0xb1, 0x1c, 0x93, - 0xe9, 0x71, 0x0a, 0x98, 0x1d, 0x3c, 0x46, 0x61, 0x9d, 0x1d, 0xda, 0x60, 0xc9, 0x26, 0xb6, 0xdc, - 0xed, 0x35, 0xa7, 0xd5, 0xe9, 0x1d, 0xcc, 0x92, 0x24, 0xd2, 0x38, 0x8f, 0x8a, 0x8c, 0x14, 0x1c, - 0x81, 0x63, 0x46, 0xe8, 0x33, 0x30, 0x15, 0xe6, 0x98, 0x50, 0xf2, 0xd2, 0x88, 0x76, 0xb2, 0x2a, - 0x2c, 0x3e, 0x74, 0xef, 0x60, 0x76, 0x2a, 0xcb, 0xd8, 0x92, 0x55, 0x8d, 0xfd, 0xc5, 0x53, 0x60, - 0x2c, 0x62, 0x23, 0xab, 0xb4, 0x75, 0x4c, 0x59, 0xa5, 0x31, 0x0c, 0x91, 0x66, 0x2b, 0xda, 0x5f, - 0x76, 0x03, 0x31, 0x26, 0x99, 0x3c, 0x57, 0x04, 0x4d, 0x9a, 0xa7, 0xc4, 0x60, 0xc5, 0x27, 0x3b, - 0xf5, 0x77, 0xf1, 0x9b, 0x98, 0xfa, 0xbb, 0xef, 0x04, 0x53, 0x7f, 0xaf, 0xc3, 0xe0, 0xb6, 0x1b, - 0x61, 0xd2, 0xf2, 0xc5, 0x65, 0x2e, 0x73, 0x1e, 0x5e, 0xe1, 0x24, 0xe9, 0x24, 0xb3, 0x02, 0x81, - 0x25, 0x13, 0xf4, 0x9a, 0x5a, 0x81, 0x03, 0xf9, 0x0a, 0x97, 0xb4, 0x13, 0x4b, 0xe6, 0x1a, 0x14, - 0x09, 0xbe, 0x07, 0xef, 0x37, 0xc1, 0xf7, 0xaa, 0x4c, 0xcb, 0x3d, 0x94, 0xff, 0x68, 0x8d, 0x65, - 0xdd, 0xee, 0x92, 0x8c, 0xfb, 0x96, 0x9e, 0xca, 0xbc, 0x94, 0xbf, 0x13, 0xa8, 0x2c, 0xe5, 0x3d, - 0x26, 0x30, 0xff, 0x3e, 0x0b, 0x4e, 0xb7, 0xb2, 0xb2, 0xfa, 0x0b, 0x7f, 0x8f, 0x17, 0x7b, 0xc9, - 0xfd, 0xca, 0x0a, 0x18, 0x15, 0x32, 0x3d, 0x69, 0x26, 0x19, 0xce, 0xae, 0x8e, 0x76, 0x74, 0xb0, - 0x59, 0x17, 0x7e, 0x07, 0x8f, 0xe5, 0x64, 0x42, 0xef, 0x90, 0xff, 0x7c, 0x23, 0x23, 0xeb, 0xf6, - 0xe3, 0x79, 0x59, 0xb7, 0x7b, 0xce, 0xb5, 0xfd, 0x9a, 0xca, 0x81, 0x3e, 0x9a, 0x3f, 0x95, 0x78, - 0x86, 0xf3, 0xae, 0x99, 0xcf, 0x5f, 0x53, 0x99, 0xcf, 0x3b, 0x84, 0xd7, 0xe6, 0x79, 0xcd, 0xbb, - 0xe6, 0x3b, 0xd7, 0x72, 0x96, 0x8f, 0x1f, 0x4f, 0xce, 0x72, 0xe3, 0xa8, 0xe1, 0x69, 0xb3, 0x9f, - 0xee, 0x72, 0xd4, 0x18, 0x7c, 0x3b, 0x1f, 0x36, 0x3c, 0x3f, 0xfb, 0xe4, 0x7d, 0xe5, 0x67, 0xbf, - 0xa5, 0xe7, 0x3b, 0x47, 0x5d, 0x12, 0x7a, 0x53, 0xa2, 0x1e, 0xb3, 0x9c, 0xdf, 0xd2, 0x0f, 0xc0, - 0xa9, 0x7c, 0xbe, 0xea, 0x9c, 0x4b, 0xf3, 0xcd, 0x3c, 0x02, 0x53, 0xd9, 0xd3, 0x4f, 0x9d, 0x4c, - 0xf6, 0xf4, 0xd3, 0xc7, 0x9e, 0x3d, 0xfd, 0xcc, 0x09, 0x64, 0x4f, 0x7f, 0xe8, 0x04, 0xb3, 0xa7, - 0xdf, 0x62, 0x4e, 0x52, 0x3c, 0x10, 0x94, 0x08, 0x07, 0xfe, 0x54, 0x4e, 0x1c, 0xb5, 0x74, 0xb4, - 0x28, 0xfe, 0x71, 0x0a, 0x85, 0x63, 0x56, 0x19, 0x59, 0xd9, 0xa7, 0x1f, 0x40, 0x56, 0xf6, 0xf5, - 0x38, 0x2b, 0xfb, 0xd9, 0xfc, 0xa1, 0xce, 0x78, 0x56, 0x93, 0x93, 0x8b, 0xfd, 0x96, 0x9e, 0x43, - 0xfd, 0xe1, 0x0e, 0x96, 0xb0, 0x2c, 0x85, 0x72, 0x87, 0xcc, 0xe9, 0xaf, 0xf2, 0xcc, 0xe9, 0x8f, - 0xe4, 0xef, 0xe4, 0xc9, 0xe3, 0xce, 0xc8, 0x97, 0x4e, 0xdb, 0xa5, 0x02, 0xa9, 0xb2, 0xc0, 0xe7, - 0x39, 0xed, 0x52, 0x91, 0x58, 0xd3, 0xed, 0x52, 0x28, 0x1c, 0xb3, 0xb2, 0x7f, 0xa0, 0x00, 0xe7, - 0x3b, 0xaf, 0xb7, 0x58, 0x4b, 0x5e, 0x89, 0x1d, 0x03, 0x12, 0x5a, 0x72, 0x7e, 0x67, 0x8b, 0xa9, - 0x7a, 0x8e, 0x0b, 0x79, 0x05, 0x26, 0xd5, 0x7b, 0x9c, 0x86, 0x5b, 0xdb, 0x5f, 0x8f, 0xaf, 0xc9, - 0x2a, 0x82, 0x42, 0x35, 0x49, 0x80, 0xd3, 0x65, 0xd0, 0x02, 0x8c, 0x1b, 0xc0, 0xf2, 0xb2, 0xb8, - 0x9b, 0xc5, 0xa1, 0xb6, 0x4d, 0x34, 0x4e, 0xd2, 0xdb, 0x5f, 0xb2, 0xe0, 0xa1, 0x9c, 0xb4, 0xa3, - 0x3d, 0x87, 0x3d, 0xdc, 0x82, 0xf1, 0x96, 0x59, 0xb4, 0x4b, 0xa4, 0x56, 0x23, 0xb9, 0xa9, 0x6a, - 0x6b, 0x02, 0x81, 0x93, 0x4c, 0xed, 0x9f, 0x2e, 0xc0, 0xb9, 0x8e, 0x0e, 0xa6, 0x08, 0xc3, 0x99, - 0xed, 0x66, 0xe8, 0x2c, 0x05, 0xa4, 0x4e, 0xbc, 0xc8, 0x75, 0x1a, 0xd5, 0x16, 0xa9, 0x69, 0x76, - 0x0e, 0xe6, 0xa9, 0x79, 0x65, 0xad, 0xba, 0x90, 0xa6, 0xc0, 0x39, 0x25, 0xd1, 0x2a, 0xa0, 0x34, - 0x46, 0x8c, 0x30, 0x0b, 0xa1, 0x9f, 0xe6, 0x87, 0x33, 0x4a, 0xa0, 0x0f, 0xc0, 0xa8, 0x72, 0x5c, - 0xd5, 0x46, 0x9c, 0x6d, 0xec, 0x58, 0x47, 0x60, 0x93, 0x0e, 0x5d, 0xe6, 0x39, 0x18, 0x44, 0xb6, - 0x0e, 0x61, 0x14, 0x19, 0x97, 0x09, 0x16, 0x04, 0x18, 0xeb, 0x34, 0x8b, 0x2f, 0xfd, 0xda, 0xef, - 0x9d, 0x7f, 0xcf, 0x6f, 0xfc, 0xde, 0xf9, 0xf7, 0xfc, 0xf6, 0xef, 0x9d, 0x7f, 0xcf, 0x77, 0xdd, - 0x3b, 0x6f, 0xfd, 0xda, 0xbd, 0xf3, 0xd6, 0x6f, 0xdc, 0x3b, 0x6f, 0xfd, 0xf6, 0xbd, 0xf3, 0xd6, - 0xef, 0xde, 0x3b, 0x6f, 0x7d, 0xe1, 0xf7, 0xcf, 0xbf, 0xe7, 0x63, 0x28, 0x0e, 0x24, 0x3a, 0x4f, - 0x47, 0x67, 0x7e, 0xef, 0xf2, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x52, 0x56, 0xa0, 0x3b, 0xf7, - 0x0c, 0x01, 0x00, + // 14685 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x5c, 0xd7, + 0x75, 0x18, 0xac, 0xd7, 0x3d, 0x5b, 0x9f, 0xd9, 0xef, 0x00, 0xe0, 0x60, 0x48, 0xa0, 0xc1, 0x47, + 0x12, 0x04, 0x45, 0x72, 0x20, 0x70, 0x91, 0x28, 0x52, 0xa2, 0x35, 0x2b, 0x30, 0x04, 0x66, 0xd0, + 0xbc, 0x3d, 0x00, 0x24, 0x8a, 0x52, 0xe9, 0x4d, 0xf7, 0x9d, 0x99, 0xa7, 0xe9, 0x7e, 0xaf, 0xf9, + 0xde, 0xeb, 0x01, 0x06, 0x9f, 0x54, 0x9f, 0x2d, 0xc7, 0x8b, 0x6c, 0x27, 0xa5, 0x4a, 0x39, 0x4b, + 0xc9, 0x2e, 0x57, 0xca, 0x76, 0x6c, 0x2b, 0xca, 0xa6, 0xc8, 0xb1, 0x1d, 0xcb, 0x5b, 0xb6, 0x8a, + 0x93, 0x4a, 0x39, 0x8e, 0xab, 0x62, 0xb9, 0xe2, 0xca, 0xc4, 0x82, 0x53, 0xe5, 0x52, 0x55, 0x62, + 0x3b, 0xcb, 0x8f, 0x64, 0xe2, 0xc4, 0xa9, 0xbb, 0xbe, 0x7b, 0xdf, 0xd2, 0xdd, 0x03, 0x0e, 0x46, + 0x94, 0x8a, 0xff, 0xba, 0xcf, 0x39, 0xf7, 0xdc, 0xfb, 0xee, 0x7a, 0xee, 0x39, 0xe7, 0x9e, 0x03, + 0xaf, 0xec, 0xbc, 0x14, 0xce, 0xba, 0xfe, 0xc5, 0x9d, 0xf6, 0x06, 0x09, 0x3c, 0x12, 0x91, 0xf0, + 0xe2, 0x2e, 0xf1, 0xea, 0x7e, 0x70, 0x51, 0x20, 0x9c, 0x96, 0x7b, 0xb1, 0xe6, 0x07, 0xe4, 0xe2, + 0xee, 0xa5, 0x8b, 0x5b, 0xc4, 0x23, 0x81, 0x13, 0x91, 0xfa, 0x6c, 0x2b, 0xf0, 0x23, 0x1f, 0x21, + 0x4e, 0x33, 0xeb, 0xb4, 0xdc, 0x59, 0x4a, 0x33, 0xbb, 0x7b, 0x69, 0xe6, 0xd9, 0x2d, 0x37, 0xda, + 0x6e, 0x6f, 0xcc, 0xd6, 0xfc, 0xe6, 0xc5, 0x2d, 0x7f, 0xcb, 0xbf, 0xc8, 0x48, 0x37, 0xda, 0x9b, + 0xec, 0x1f, 0xfb, 0xc3, 0x7e, 0x71, 0x16, 0x33, 0x2f, 0xc4, 0xd5, 0x34, 0x9d, 0xda, 0xb6, 0xeb, + 0x91, 0x60, 0xef, 0x62, 0x6b, 0x67, 0x8b, 0xd5, 0x1b, 0x90, 0xd0, 0x6f, 0x07, 0x35, 0x92, 0xac, + 0xb8, 0x63, 0xa9, 0xf0, 0x62, 0x93, 0x44, 0x4e, 0x46, 0x73, 0x67, 0x2e, 0xe6, 0x95, 0x0a, 0xda, + 0x5e, 0xe4, 0x36, 0xd3, 0xd5, 0xbc, 0xbf, 0x5b, 0x81, 0xb0, 0xb6, 0x4d, 0x9a, 0x4e, 0xaa, 0xdc, + 0xf3, 0x79, 0xe5, 0xda, 0x91, 0xdb, 0xb8, 0xe8, 0x7a, 0x51, 0x18, 0x05, 0xc9, 0x42, 0xf6, 0xd7, + 0x2d, 0x38, 0x37, 0x77, 0xab, 0xba, 0xd4, 0x70, 0xc2, 0xc8, 0xad, 0xcd, 0x37, 0xfc, 0xda, 0x4e, + 0x35, 0xf2, 0x03, 0x72, 0xd3, 0x6f, 0xb4, 0x9b, 0xa4, 0xca, 0x3a, 0x02, 0x3d, 0x03, 0x43, 0xbb, + 0xec, 0xff, 0xca, 0xe2, 0xb4, 0x75, 0xce, 0xba, 0x50, 0x9a, 0x9f, 0xf8, 0xcd, 0xfd, 0xf2, 0x7b, + 0xee, 0xed, 0x97, 0x87, 0x6e, 0x0a, 0x38, 0x56, 0x14, 0xe8, 0x3c, 0x0c, 0x6c, 0x86, 0xeb, 0x7b, + 0x2d, 0x32, 0x5d, 0x60, 0xb4, 0x63, 0x82, 0x76, 0x60, 0xb9, 0x4a, 0xa1, 0x58, 0x60, 0xd1, 0x45, + 0x28, 0xb5, 0x9c, 0x20, 0x72, 0x23, 0xd7, 0xf7, 0xa6, 0x8b, 0xe7, 0xac, 0x0b, 0xfd, 0xf3, 0x93, + 0x82, 0xb4, 0x54, 0x91, 0x08, 0x1c, 0xd3, 0xd0, 0x66, 0x04, 0xc4, 0xa9, 0x5f, 0xf7, 0x1a, 0x7b, + 0xd3, 0x7d, 0xe7, 0xac, 0x0b, 0x43, 0x71, 0x33, 0xb0, 0x80, 0x63, 0x45, 0x61, 0x7f, 0xb1, 0x00, + 0x43, 0x73, 0x9b, 0x9b, 0xae, 0xe7, 0x46, 0x7b, 0xe8, 0x26, 0x8c, 0x78, 0x7e, 0x9d, 0xc8, 0xff, + 0xec, 0x2b, 0x86, 0x9f, 0x3b, 0x37, 0x9b, 0x9e, 0x4a, 0xb3, 0x6b, 0x1a, 0xdd, 0xfc, 0xc4, 0xbd, + 0xfd, 0xf2, 0x88, 0x0e, 0xc1, 0x06, 0x1f, 0x84, 0x61, 0xb8, 0xe5, 0xd7, 0x15, 0xdb, 0x02, 0x63, + 0x5b, 0xce, 0x62, 0x5b, 0x89, 0xc9, 0xe6, 0xc7, 0xef, 0xed, 0x97, 0x87, 0x35, 0x00, 0xd6, 0x99, + 0xa0, 0x0d, 0x18, 0xa7, 0x7f, 0xbd, 0xc8, 0x55, 0x7c, 0x8b, 0x8c, 0xef, 0x63, 0x79, 0x7c, 0x35, + 0xd2, 0xf9, 0xa9, 0x7b, 0xfb, 0xe5, 0xf1, 0x04, 0x10, 0x27, 0x19, 0xda, 0x77, 0x61, 0x6c, 0x2e, + 0x8a, 0x9c, 0xda, 0x36, 0xa9, 0xf3, 0x11, 0x44, 0x2f, 0x40, 0x9f, 0xe7, 0x34, 0x89, 0x18, 0xdf, + 0x73, 0xa2, 0x63, 0xfb, 0xd6, 0x9c, 0x26, 0x39, 0xd8, 0x2f, 0x4f, 0xdc, 0xf0, 0xdc, 0xb7, 0xda, + 0x62, 0x56, 0x50, 0x18, 0x66, 0xd4, 0xe8, 0x39, 0x80, 0x3a, 0xd9, 0x75, 0x6b, 0xa4, 0xe2, 0x44, + 0xdb, 0x62, 0xbc, 0x91, 0x28, 0x0b, 0x8b, 0x0a, 0x83, 0x35, 0x2a, 0xfb, 0x0e, 0x94, 0xe6, 0x76, + 0x7d, 0xb7, 0x5e, 0xf1, 0xeb, 0x21, 0xda, 0x81, 0xf1, 0x56, 0x40, 0x36, 0x49, 0xa0, 0x40, 0xd3, + 0xd6, 0xb9, 0xe2, 0x85, 0xe1, 0xe7, 0x2e, 0x64, 0x7e, 0xac, 0x49, 0xba, 0xe4, 0x45, 0xc1, 0xde, + 0xfc, 0x43, 0xa2, 0xbe, 0xf1, 0x04, 0x16, 0x27, 0x39, 0xdb, 0xff, 0xac, 0x00, 0x27, 0xe7, 0xee, + 0xb6, 0x03, 0xb2, 0xe8, 0x86, 0x3b, 0xc9, 0x19, 0x5e, 0x77, 0xc3, 0x9d, 0xb5, 0xb8, 0x07, 0xd4, + 0xd4, 0x5a, 0x14, 0x70, 0xac, 0x28, 0xd0, 0xb3, 0x30, 0x48, 0x7f, 0xdf, 0xc0, 0x2b, 0xe2, 0x93, + 0xa7, 0x04, 0xf1, 0xf0, 0xa2, 0x13, 0x39, 0x8b, 0x1c, 0x85, 0x25, 0x0d, 0x5a, 0x85, 0xe1, 0x1a, + 0x5b, 0x90, 0x5b, 0xab, 0x7e, 0x9d, 0xb0, 0xc1, 0x2c, 0xcd, 0x3f, 0x4d, 0xc9, 0x17, 0x62, 0xf0, + 0xc1, 0x7e, 0x79, 0x9a, 0xb7, 0x4d, 0xb0, 0xd0, 0x70, 0x58, 0x2f, 0x8f, 0x6c, 0xb5, 0xbe, 0xfa, + 0x18, 0x27, 0xc8, 0x58, 0x5b, 0x17, 0xb4, 0xa5, 0xd2, 0xcf, 0x96, 0xca, 0x48, 0xf6, 0x32, 0x41, + 0x97, 0xa0, 0x6f, 0xc7, 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd7, 0x19, 0x3a, 0xe6, 0x57, 0x5d, 0xaf, + 0x7e, 0xb0, 0x5f, 0x9e, 0x34, 0x9a, 0x43, 0x81, 0x98, 0x91, 0xda, 0xff, 0xdd, 0x82, 0x32, 0xc3, + 0x2d, 0xbb, 0x0d, 0x52, 0x21, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0xa3, 0x43, 0x9f, 0x03, 0x08, + 0x49, 0x2d, 0x20, 0x91, 0xd6, 0xa5, 0x6a, 0x62, 0x54, 0x15, 0x06, 0x6b, 0x54, 0x74, 0x43, 0x08, + 0xb7, 0x9d, 0x80, 0xcd, 0x2f, 0xd1, 0xb1, 0x6a, 0x43, 0xa8, 0x4a, 0x04, 0x8e, 0x69, 0x8c, 0x0d, + 0xa1, 0xd8, 0x6d, 0x43, 0x40, 0x1f, 0x86, 0xf1, 0xb8, 0xb2, 0xb0, 0xe5, 0xd4, 0x64, 0x07, 0xb2, + 0x25, 0x53, 0x35, 0x51, 0x38, 0x49, 0x6b, 0xff, 0x2d, 0x4b, 0x4c, 0x1e, 0xfa, 0xd5, 0xef, 0xf0, + 0x6f, 0xb5, 0x7f, 0xc9, 0x82, 0xc1, 0x79, 0xd7, 0xab, 0xbb, 0xde, 0x16, 0xfa, 0x14, 0x0c, 0xd1, + 0xb3, 0xa9, 0xee, 0x44, 0x8e, 0xd8, 0xf7, 0xde, 0xa7, 0xad, 0x2d, 0x75, 0x54, 0xcc, 0xb6, 0x76, + 0xb6, 0x28, 0x20, 0x9c, 0xa5, 0xd4, 0x74, 0xb5, 0x5d, 0xdf, 0xf8, 0x34, 0xa9, 0x45, 0xab, 0x24, + 0x72, 0xe2, 0xcf, 0x89, 0x61, 0x58, 0x71, 0x45, 0x57, 0x61, 0x20, 0x72, 0x82, 0x2d, 0x12, 0x89, + 0x0d, 0x30, 0x73, 0xa3, 0xe2, 0x25, 0x31, 0x5d, 0x91, 0xc4, 0xab, 0x91, 0xf8, 0x58, 0x58, 0x67, + 0x45, 0xb1, 0x60, 0x61, 0xff, 0x9f, 0x41, 0x38, 0xbd, 0x50, 0x5d, 0xc9, 0x99, 0x57, 0xe7, 0x61, + 0xa0, 0x1e, 0xb8, 0xbb, 0x24, 0x10, 0xfd, 0xac, 0xb8, 0x2c, 0x32, 0x28, 0x16, 0x58, 0xf4, 0x12, + 0x8c, 0xf0, 0x03, 0xe9, 0x8a, 0xe3, 0xd5, 0x1b, 0xb2, 0x8b, 0x4f, 0x08, 0xea, 0x91, 0x9b, 0x1a, + 0x0e, 0x1b, 0x94, 0x87, 0x9c, 0x54, 0xe7, 0x13, 0x8b, 0x31, 0xef, 0xb0, 0xfb, 0xbc, 0x05, 0x13, + 0xbc, 0x9a, 0xb9, 0x28, 0x0a, 0xdc, 0x8d, 0x76, 0x44, 0xc2, 0xe9, 0x7e, 0xb6, 0xd3, 0x2d, 0x64, + 0xf5, 0x56, 0x6e, 0x0f, 0xcc, 0xde, 0x4c, 0x70, 0xe1, 0x9b, 0xe0, 0xb4, 0xa8, 0x77, 0x22, 0x89, + 0xc6, 0xa9, 0x6a, 0xd1, 0xf7, 0x5a, 0x30, 0x53, 0xf3, 0xbd, 0x28, 0xf0, 0x1b, 0x0d, 0x12, 0x54, + 0xda, 0x1b, 0x0d, 0x37, 0xdc, 0xe6, 0xf3, 0x14, 0x93, 0x4d, 0xb6, 0x13, 0xe4, 0x8c, 0xa1, 0x22, + 0x12, 0x63, 0x78, 0xf6, 0xde, 0x7e, 0x79, 0x66, 0x21, 0x97, 0x15, 0xee, 0x50, 0x0d, 0xda, 0x01, + 0x44, 0x8f, 0xd2, 0x6a, 0xe4, 0x6c, 0x91, 0xb8, 0xf2, 0xc1, 0xde, 0x2b, 0x3f, 0x75, 0x6f, 0xbf, + 0x8c, 0xd6, 0x52, 0x2c, 0x70, 0x06, 0x5b, 0xf4, 0x16, 0x9c, 0xa0, 0xd0, 0xd4, 0xb7, 0x0e, 0xf5, + 0x5e, 0xdd, 0xf4, 0xbd, 0xfd, 0xf2, 0x89, 0xb5, 0x0c, 0x26, 0x38, 0x93, 0x35, 0xfa, 0x6e, 0x0b, + 0x4e, 0xc7, 0x9f, 0xbf, 0x74, 0xa7, 0xe5, 0x78, 0xf5, 0xb8, 0xe2, 0x52, 0xef, 0x15, 0xd3, 0x3d, + 0xf9, 0xf4, 0x42, 0x1e, 0x27, 0x9c, 0x5f, 0x09, 0xf2, 0x60, 0x8a, 0x36, 0x2d, 0x59, 0x37, 0xf4, + 0x5e, 0xf7, 0x43, 0xf7, 0xf6, 0xcb, 0x53, 0x6b, 0x69, 0x1e, 0x38, 0x8b, 0xf1, 0xcc, 0x02, 0x9c, + 0xcc, 0x9c, 0x9d, 0x68, 0x02, 0x8a, 0x3b, 0x84, 0x4b, 0x5d, 0x25, 0x4c, 0x7f, 0xa2, 0x13, 0xd0, + 0xbf, 0xeb, 0x34, 0xda, 0x62, 0x61, 0x62, 0xfe, 0xe7, 0xe5, 0xc2, 0x4b, 0x96, 0xfd, 0xcf, 0x8b, + 0x30, 0xbe, 0x50, 0x5d, 0xb9, 0xaf, 0x55, 0xaf, 0x1f, 0x7b, 0x85, 0x8e, 0xc7, 0x5e, 0x7c, 0x88, + 0x16, 0x73, 0x0f, 0xd1, 0xff, 0x3f, 0x63, 0xc9, 0xf6, 0xb1, 0x25, 0xfb, 0xc1, 0x9c, 0x25, 0x7b, + 0xc4, 0x0b, 0x75, 0x37, 0x67, 0xd6, 0xf6, 0xb3, 0x01, 0xcc, 0x94, 0x90, 0xae, 0xf9, 0x35, 0xa7, + 0x91, 0xdc, 0x6a, 0x0f, 0x39, 0x75, 0x8f, 0x66, 0x1c, 0x6b, 0x30, 0xb2, 0xe0, 0xb4, 0x9c, 0x0d, + 0xb7, 0xe1, 0x46, 0x2e, 0x09, 0xd1, 0x93, 0x50, 0x74, 0xea, 0x75, 0x26, 0xdd, 0x95, 0xe6, 0x4f, + 0xde, 0xdb, 0x2f, 0x17, 0xe7, 0xea, 0x54, 0xcc, 0x00, 0x45, 0xb5, 0x87, 0x29, 0x05, 0x7a, 0x2f, + 0xf4, 0xd5, 0x03, 0xbf, 0x35, 0x5d, 0x60, 0x94, 0x74, 0x95, 0xf7, 0x2d, 0x06, 0x7e, 0x2b, 0x41, + 0xca, 0x68, 0xec, 0xdf, 0x28, 0xc0, 0x23, 0x0b, 0xa4, 0xb5, 0xbd, 0x5c, 0xcd, 0x39, 0x2f, 0x2e, + 0xc0, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0x51, 0x35, 0x9b, 0x11, 0xab, 0x02, 0x86, 0x15, + 0x16, 0x9d, 0x83, 0xbe, 0x56, 0x2c, 0xc4, 0x8e, 0x48, 0x01, 0x98, 0x89, 0xaf, 0x0c, 0x43, 0x29, + 0xda, 0x21, 0x09, 0xc4, 0x8c, 0x51, 0x14, 0x37, 0x42, 0x12, 0x60, 0x86, 0x89, 0x25, 0x01, 0x2a, + 0x23, 0x88, 0x13, 0x21, 0x21, 0x09, 0x50, 0x0c, 0xd6, 0xa8, 0x50, 0x05, 0x4a, 0x61, 0x62, 0x64, + 0x7b, 0x5a, 0x9a, 0xa3, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, 0xae, 0xa2, + 0xc2, 0xd7, 0x0a, 0x80, 0x78, 0x17, 0x7e, 0x9b, 0x75, 0xdc, 0x8d, 0x74, 0xc7, 0xf5, 0xbe, 0x24, + 0x8e, 0xaa, 0xf7, 0xfe, 0x87, 0x05, 0x8f, 0x2c, 0xb8, 0x5e, 0x9d, 0x04, 0x39, 0x13, 0xf0, 0xc1, + 0xdc, 0x9d, 0x0f, 0x27, 0xa4, 0x18, 0x53, 0xac, 0xef, 0x08, 0xa6, 0x98, 0xfd, 0x27, 0x16, 0x20, + 0xfe, 0xd9, 0xef, 0xb8, 0x8f, 0xbd, 0x91, 0xfe, 0xd8, 0x23, 0x98, 0x16, 0xf6, 0xdf, 0xb3, 0x60, + 0x78, 0xa1, 0xe1, 0xb8, 0x4d, 0xf1, 0xa9, 0x0b, 0x30, 0x29, 0x15, 0x45, 0x0c, 0xac, 0xc9, 0xfe, + 0x74, 0x73, 0x9b, 0xc4, 0x49, 0x24, 0x4e, 0xd3, 0xa3, 0x8f, 0xc3, 0x69, 0x03, 0xb8, 0x4e, 0x9a, + 0xad, 0x86, 0x13, 0xe9, 0xb7, 0x02, 0x76, 0xfa, 0xe3, 0x3c, 0x22, 0x9c, 0x5f, 0xde, 0xbe, 0x06, + 0x63, 0x0b, 0x0d, 0x97, 0x78, 0xd1, 0x4a, 0x65, 0xc1, 0xf7, 0x36, 0xdd, 0x2d, 0xf4, 0x32, 0x8c, + 0x45, 0x6e, 0x93, 0xf8, 0xed, 0xa8, 0x4a, 0x6a, 0xbe, 0xc7, 0xee, 0xda, 0xd6, 0x85, 0xfe, 0x79, + 0x74, 0x6f, 0xbf, 0x3c, 0xb6, 0x6e, 0x60, 0x70, 0x82, 0xd2, 0xfe, 0x7d, 0x3a, 0xe2, 0x7e, 0xb3, + 0xe5, 0x7b, 0xc4, 0x8b, 0x16, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0x2f, 0x43, 0x5f, 0x44, 0x47, 0x90, + 0x7f, 0xf9, 0x79, 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0xec, 0x97, 0x4f, 0xa5, 0x4b, 0xb0, 0x91, 0x65, + 0x65, 0xd0, 0x07, 0x61, 0x20, 0x8c, 0x9c, 0xa8, 0x1d, 0x8a, 0x4f, 0x7d, 0x54, 0x8e, 0x7f, 0x95, + 0x41, 0x0f, 0xf6, 0xcb, 0xe3, 0xaa, 0x18, 0x07, 0x61, 0x51, 0x00, 0x3d, 0x05, 0x83, 0x4d, 0x12, + 0x86, 0xce, 0x96, 0x3c, 0xbf, 0xc7, 0x45, 0xd9, 0xc1, 0x55, 0x0e, 0xc6, 0x12, 0x8f, 0x1e, 0x83, + 0x7e, 0x12, 0x04, 0x7e, 0x20, 0x76, 0x95, 0x51, 0x41, 0xd8, 0xbf, 0x44, 0x81, 0x98, 0xe3, 0xec, + 0x7f, 0x63, 0xc1, 0xb8, 0x6a, 0x2b, 0xaf, 0xeb, 0x18, 0xee, 0x4d, 0x6f, 0x00, 0xd4, 0xe4, 0x07, + 0x86, 0xec, 0xbc, 0x1b, 0x7e, 0xee, 0x7c, 0xa6, 0x68, 0x91, 0xea, 0xc6, 0x98, 0xb3, 0x02, 0x85, + 0x58, 0xe3, 0x66, 0xff, 0xaa, 0x05, 0x53, 0x89, 0x2f, 0xba, 0xe6, 0x86, 0x11, 0x7a, 0x33, 0xf5, + 0x55, 0xb3, 0xbd, 0x7d, 0x15, 0x2d, 0xcd, 0xbe, 0x49, 0x2d, 0x3e, 0x09, 0xd1, 0xbe, 0xe8, 0x0a, + 0xf4, 0xbb, 0x11, 0x69, 0xca, 0x8f, 0x79, 0xac, 0xe3, 0xc7, 0xf0, 0x56, 0xc5, 0x23, 0xb2, 0x42, + 0x4b, 0x62, 0xce, 0xc0, 0xfe, 0x8d, 0x22, 0x94, 0xf8, 0xb4, 0x5d, 0x75, 0x5a, 0xc7, 0x30, 0x16, + 0x4f, 0x43, 0xc9, 0x6d, 0x36, 0xdb, 0x91, 0xb3, 0x21, 0x0e, 0xa0, 0x21, 0xbe, 0x19, 0xac, 0x48, + 0x20, 0x8e, 0xf1, 0x68, 0x05, 0xfa, 0x58, 0x53, 0xf8, 0x57, 0x3e, 0x99, 0xfd, 0x95, 0xa2, 0xed, + 0xb3, 0x8b, 0x4e, 0xe4, 0x70, 0xd9, 0x4f, 0x9d, 0x7c, 0x14, 0x84, 0x19, 0x0b, 0xe4, 0x00, 0x6c, + 0xb8, 0x9e, 0x13, 0xec, 0x51, 0xd8, 0x74, 0x91, 0x31, 0x7c, 0xb6, 0x33, 0xc3, 0x79, 0x45, 0xcf, + 0xd9, 0xaa, 0x0f, 0x8b, 0x11, 0x58, 0x63, 0x3a, 0xf3, 0x01, 0x28, 0x29, 0xe2, 0xc3, 0x88, 0x70, + 0x33, 0x1f, 0x86, 0xf1, 0x44, 0x5d, 0xdd, 0x8a, 0x8f, 0xe8, 0x12, 0xe0, 0x2f, 0xb3, 0x2d, 0x43, + 0xb4, 0x7a, 0xc9, 0xdb, 0x15, 0x3b, 0xe7, 0x5d, 0x38, 0xd1, 0xc8, 0xd8, 0x7b, 0xc5, 0xb8, 0xf6, + 0xbe, 0x57, 0x3f, 0x22, 0x3e, 0xfb, 0x44, 0x16, 0x16, 0x67, 0xd6, 0x41, 0xa5, 0x1a, 0xbf, 0x45, + 0x17, 0x88, 0xd3, 0xd0, 0x2f, 0x08, 0xd7, 0x05, 0x0c, 0x2b, 0x2c, 0xdd, 0xef, 0x4e, 0xa8, 0xc6, + 0x5f, 0x25, 0x7b, 0x55, 0xd2, 0x20, 0xb5, 0xc8, 0x0f, 0xbe, 0xa5, 0xcd, 0x3f, 0xc3, 0x7b, 0x9f, + 0x6f, 0x97, 0xc3, 0x82, 0x41, 0xf1, 0x2a, 0xd9, 0xe3, 0x43, 0xa1, 0x7f, 0x5d, 0xb1, 0xe3, 0xd7, + 0x7d, 0xc5, 0x82, 0x51, 0xf5, 0x75, 0xc7, 0xb0, 0x2f, 0xcc, 0x9b, 0xfb, 0xc2, 0x99, 0x8e, 0x13, + 0x3c, 0x67, 0x47, 0xf8, 0x5a, 0x01, 0x4e, 0x2b, 0x1a, 0x7a, 0x9b, 0xe1, 0x7f, 0xc4, 0xac, 0xba, + 0x08, 0x25, 0x4f, 0xe9, 0xf5, 0x2c, 0x53, 0xa1, 0x16, 0x6b, 0xf5, 0x62, 0x1a, 0x2a, 0x94, 0x7a, + 0xf1, 0x31, 0x3b, 0xa2, 0x2b, 0xbc, 0x85, 0x72, 0x7b, 0x1e, 0x8a, 0x6d, 0xb7, 0x2e, 0x0e, 0x98, + 0xf7, 0xc9, 0xde, 0xbe, 0xb1, 0xb2, 0x78, 0xb0, 0x5f, 0x7e, 0x34, 0xcf, 0xd8, 0x42, 0x4f, 0xb6, + 0x70, 0xf6, 0xc6, 0xca, 0x22, 0xa6, 0x85, 0xd1, 0x1c, 0x8c, 0xcb, 0x13, 0xfa, 0x26, 0x15, 0x10, + 0x7d, 0x4f, 0x9c, 0x43, 0x4a, 0x6b, 0x8d, 0x4d, 0x34, 0x4e, 0xd2, 0xa3, 0x45, 0x98, 0xd8, 0x69, + 0x6f, 0x90, 0x06, 0x89, 0xf8, 0x07, 0x5f, 0x25, 0x5c, 0xa7, 0x5b, 0x8a, 0xef, 0x92, 0x57, 0x13, + 0x78, 0x9c, 0x2a, 0x61, 0xff, 0x39, 0x3b, 0x0f, 0x44, 0xef, 0x55, 0x02, 0x9f, 0x4e, 0x2c, 0xca, + 0xfd, 0x5b, 0x39, 0x9d, 0x7b, 0x99, 0x15, 0x57, 0xc9, 0xde, 0xba, 0x4f, 0xef, 0x12, 0xd9, 0xb3, + 0xc2, 0x98, 0xf3, 0x7d, 0x1d, 0xe7, 0xfc, 0xcf, 0x17, 0xe0, 0xa4, 0xea, 0x01, 0x43, 0x6c, 0xfd, + 0x76, 0xef, 0x83, 0x4b, 0x30, 0x5c, 0x27, 0x9b, 0x4e, 0xbb, 0x11, 0x29, 0x03, 0x43, 0x3f, 0x37, + 0x32, 0x2d, 0xc6, 0x60, 0xac, 0xd3, 0x1c, 0xa2, 0xdb, 0xfe, 0xfd, 0x08, 0x3b, 0x88, 0x23, 0x87, + 0xce, 0x71, 0xb5, 0x6a, 0xac, 0xdc, 0x55, 0xf3, 0x18, 0xf4, 0xbb, 0x4d, 0x2a, 0x98, 0x15, 0x4c, + 0x79, 0x6b, 0x85, 0x02, 0x31, 0xc7, 0xa1, 0x27, 0x60, 0xb0, 0xe6, 0x37, 0x9b, 0x8e, 0x57, 0x67, + 0x47, 0x5e, 0x69, 0x7e, 0x98, 0xca, 0x6e, 0x0b, 0x1c, 0x84, 0x25, 0x0e, 0x3d, 0x02, 0x7d, 0x4e, + 0xb0, 0xc5, 0xb5, 0x2e, 0xa5, 0xf9, 0x21, 0x5a, 0xd3, 0x5c, 0xb0, 0x15, 0x62, 0x06, 0xa5, 0x97, + 0xc6, 0xdb, 0x7e, 0xb0, 0xe3, 0x7a, 0x5b, 0x8b, 0x6e, 0x20, 0x96, 0x84, 0x3a, 0x0b, 0x6f, 0x29, + 0x0c, 0xd6, 0xa8, 0xd0, 0x32, 0xf4, 0xb7, 0xfc, 0x20, 0x0a, 0xa7, 0x07, 0x58, 0x77, 0x3f, 0x9a, + 0xb3, 0x11, 0xf1, 0xaf, 0xad, 0xf8, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1c, 0x5d, + 0x83, 0x41, 0xe2, 0xed, 0x2e, 0x07, 0x7e, 0x73, 0x7a, 0x2a, 0x9f, 0xd3, 0x12, 0x27, 0xe1, 0xd3, + 0x2c, 0x96, 0x51, 0x05, 0x18, 0x4b, 0x16, 0xe8, 0x83, 0x50, 0x24, 0xde, 0xee, 0xf4, 0x20, 0xe3, + 0x34, 0x93, 0xc3, 0xe9, 0xa6, 0x13, 0xc4, 0x7b, 0xfe, 0x92, 0xb7, 0x8b, 0x69, 0x19, 0xf4, 0x31, + 0x28, 0xc9, 0x0d, 0x23, 0x14, 0xea, 0xcc, 0xcc, 0x09, 0x2b, 0xb7, 0x19, 0x4c, 0xde, 0x6a, 0xbb, + 0x01, 0x69, 0x12, 0x2f, 0x0a, 0xe3, 0x1d, 0x52, 0x62, 0x43, 0x1c, 0x73, 0x43, 0x35, 0x18, 0x09, + 0x48, 0xe8, 0xde, 0x25, 0x15, 0xbf, 0xe1, 0xd6, 0xf6, 0xa6, 0x1f, 0x62, 0xcd, 0x7b, 0xaa, 0x63, + 0x97, 0x61, 0xad, 0x40, 0xac, 0x6e, 0xd7, 0xa1, 0xd8, 0x60, 0x8a, 0x3e, 0x26, 0x15, 0xf5, 0xab, + 0x7e, 0xdb, 0x8b, 0xc2, 0xe9, 0x12, 0xab, 0x24, 0xd3, 0x84, 0x7a, 0x33, 0xa6, 0x4b, 0x6a, 0xf2, + 0x79, 0x61, 0x6c, 0xb0, 0x42, 0x9f, 0x80, 0x51, 0xfe, 0x9f, 0x1b, 0x22, 0xc3, 0xe9, 0x93, 0x8c, + 0xf7, 0xb9, 0x7c, 0xde, 0x9c, 0x70, 0xfe, 0xa4, 0x60, 0x3e, 0xaa, 0x43, 0x43, 0x6c, 0x72, 0x43, + 0x18, 0x46, 0x1b, 0xee, 0x2e, 0xf1, 0x48, 0x18, 0x56, 0x02, 0x7f, 0x83, 0x08, 0xbd, 0xea, 0xe9, + 0x6c, 0xc3, 0xa5, 0xbf, 0x41, 0xe6, 0x27, 0x29, 0xcf, 0x6b, 0x7a, 0x19, 0x6c, 0xb2, 0x40, 0x37, + 0x60, 0x8c, 0x5e, 0x64, 0xdd, 0x98, 0xe9, 0x70, 0x37, 0xa6, 0xec, 0xf2, 0x86, 0x8d, 0x42, 0x38, + 0xc1, 0x04, 0x5d, 0x87, 0x91, 0x30, 0x72, 0x82, 0xa8, 0xdd, 0xe2, 0x4c, 0x4f, 0x75, 0x63, 0xca, + 0xec, 0xde, 0x55, 0xad, 0x08, 0x36, 0x18, 0xa0, 0xd7, 0xa0, 0xd4, 0x70, 0x37, 0x49, 0x6d, 0xaf, + 0xd6, 0x20, 0xd3, 0x23, 0x8c, 0x5b, 0xe6, 0xce, 0x75, 0x4d, 0x12, 0x71, 0x61, 0x5a, 0xfd, 0xc5, + 0x71, 0x71, 0x74, 0x13, 0x4e, 0x45, 0x24, 0x68, 0xba, 0x9e, 0x43, 0x77, 0x1c, 0x71, 0x7f, 0x63, + 0xf6, 0xe4, 0x51, 0xb6, 0xa4, 0xcf, 0x8a, 0xd1, 0x38, 0xb5, 0x9e, 0x49, 0x85, 0x73, 0x4a, 0xa3, + 0x3b, 0x30, 0x9d, 0x81, 0xe1, 0x53, 0xf9, 0x04, 0xe3, 0xfc, 0x21, 0xc1, 0x79, 0x7a, 0x3d, 0x87, + 0xee, 0xa0, 0x03, 0x0e, 0xe7, 0x72, 0x47, 0xd7, 0x61, 0x9c, 0x6d, 0x73, 0x95, 0x76, 0xa3, 0x21, + 0x2a, 0x1c, 0x63, 0x15, 0x3e, 0x21, 0x0f, 0xfd, 0x15, 0x13, 0x7d, 0xb0, 0x5f, 0x86, 0xf8, 0x1f, + 0x4e, 0x96, 0x46, 0x1b, 0xcc, 0x74, 0xd9, 0x0e, 0xdc, 0x68, 0x8f, 0xae, 0x34, 0x72, 0x27, 0x9a, + 0x1e, 0xef, 0xa8, 0xc6, 0xd1, 0x49, 0x95, 0x7d, 0x53, 0x07, 0xe2, 0x24, 0x43, 0xba, 0x6f, 0x87, + 0x51, 0xdd, 0xf5, 0xa6, 0x27, 0xf8, 0xe5, 0x47, 0x6e, 0x7b, 0x55, 0x0a, 0xc4, 0x1c, 0xc7, 0xcc, + 0x96, 0xf4, 0xc7, 0x75, 0x7a, 0x3c, 0x4e, 0x32, 0xc2, 0xd8, 0x6c, 0x29, 0x11, 0x38, 0xa6, 0xa1, + 0x12, 0x6b, 0x14, 0xed, 0x4d, 0x23, 0x46, 0xaa, 0x76, 0xaf, 0xf5, 0xf5, 0x8f, 0x61, 0x0a, 0xb7, + 0x37, 0x60, 0x4c, 0x6d, 0x1d, 0xac, 0x4f, 0x50, 0x19, 0xfa, 0x99, 0x8c, 0x26, 0x94, 0x8e, 0x25, + 0xda, 0x04, 0x26, 0xbf, 0x61, 0x0e, 0x67, 0x4d, 0x70, 0xef, 0x92, 0xf9, 0xbd, 0x88, 0x70, 0xc5, + 0x41, 0x51, 0x6b, 0x82, 0x44, 0xe0, 0x98, 0xc6, 0xfe, 0xbf, 0x5c, 0xd6, 0x8d, 0xb7, 0xf4, 0x1e, + 0x0e, 0xb1, 0x67, 0x60, 0x68, 0xdb, 0x0f, 0x23, 0x4a, 0xcd, 0xea, 0xe8, 0x8f, 0xa5, 0xdb, 0x2b, + 0x02, 0x8e, 0x15, 0x05, 0x7a, 0x05, 0x46, 0x6b, 0x7a, 0x05, 0xe2, 0x04, 0x56, 0xdb, 0x88, 0x51, + 0x3b, 0x36, 0x69, 0xd1, 0x4b, 0x30, 0xc4, 0x5c, 0x71, 0x6a, 0x7e, 0x43, 0x88, 0x86, 0x52, 0x8c, + 0x18, 0xaa, 0x08, 0xf8, 0x81, 0xf6, 0x1b, 0x2b, 0x6a, 0x74, 0x1e, 0x06, 0x68, 0x13, 0x56, 0x2a, + 0xe2, 0xec, 0x53, 0xfa, 0xb3, 0x2b, 0x0c, 0x8a, 0x05, 0xd6, 0xfe, 0x55, 0x8b, 0x09, 0x3e, 0xe9, + 0x0d, 0x1a, 0x5d, 0x61, 0x3b, 0x3c, 0xdb, 0xee, 0x35, 0xfd, 0xd5, 0xe3, 0xda, 0xb6, 0xad, 0x70, + 0x07, 0x89, 0xff, 0xd8, 0x28, 0x89, 0xde, 0x80, 0xd1, 0x80, 0xb0, 0x2d, 0x42, 0x4c, 0x78, 0x7e, + 0xfa, 0xbf, 0x20, 0xbb, 0x00, 0xeb, 0xc8, 0x83, 0xfd, 0xf2, 0xc3, 0xf1, 0x79, 0x44, 0xdb, 0x63, + 0xa0, 0xb1, 0xc9, 0xca, 0xfe, 0xcb, 0x05, 0x6d, 0x96, 0x54, 0x23, 0x27, 0x22, 0xa8, 0x02, 0x83, + 0xb7, 0x1d, 0x37, 0x72, 0xbd, 0x2d, 0x21, 0xa4, 0x75, 0x3e, 0x95, 0x58, 0xa1, 0x5b, 0xbc, 0x00, + 0x17, 0x35, 0xc4, 0x1f, 0x2c, 0xd9, 0x50, 0x8e, 0x41, 0xdb, 0xf3, 0x28, 0xc7, 0x42, 0xaf, 0x1c, + 0x31, 0x2f, 0xc0, 0x39, 0x8a, 0x3f, 0x58, 0xb2, 0x41, 0x6f, 0x02, 0xc8, 0x1d, 0x82, 0xd4, 0x85, + 0x0b, 0xcf, 0x33, 0xdd, 0x99, 0xae, 0xab, 0x32, 0xf3, 0x63, 0x54, 0x90, 0x89, 0xff, 0x63, 0x8d, + 0x9f, 0x1d, 0x69, 0x63, 0xaa, 0x37, 0x06, 0x7d, 0x9c, 0x2e, 0x51, 0x27, 0x88, 0x48, 0x7d, 0x2e, + 0x12, 0x9d, 0xf3, 0xde, 0xde, 0x6e, 0x72, 0xeb, 0x6e, 0x93, 0xe8, 0xcb, 0x59, 0x30, 0xc1, 0x31, + 0x3f, 0xfb, 0x17, 0x8b, 0x30, 0x9d, 0xd7, 0x5c, 0xba, 0x68, 0xc8, 0x1d, 0x37, 0x5a, 0xa0, 0x32, + 0xa8, 0x65, 0x2e, 0x9a, 0x25, 0x01, 0xc7, 0x8a, 0x82, 0xce, 0xde, 0xd0, 0xdd, 0x92, 0x17, 0xf1, + 0xfe, 0x78, 0xf6, 0x56, 0x19, 0x14, 0x0b, 0x2c, 0xa5, 0x0b, 0x88, 0x13, 0x0a, 0x1f, 0x31, 0x6d, + 0x96, 0x63, 0x06, 0xc5, 0x02, 0xab, 0xab, 0x04, 0xfb, 0xba, 0xa8, 0x04, 0x8d, 0x2e, 0xea, 0x3f, + 0xda, 0x2e, 0x42, 0x9f, 0x04, 0xd8, 0x74, 0x3d, 0x37, 0xdc, 0x66, 0xdc, 0x07, 0x0e, 0xcd, 0x5d, + 0x49, 0xb0, 0xcb, 0x8a, 0x0b, 0xd6, 0x38, 0xa2, 0x17, 0x61, 0x58, 0x6d, 0x20, 0x2b, 0x8b, 0xcc, + 0x60, 0xae, 0x39, 0x20, 0xc5, 0xbb, 0xe9, 0x22, 0xd6, 0xe9, 0xec, 0x4f, 0x27, 0xe7, 0x8b, 0x58, + 0x01, 0x5a, 0xff, 0x5a, 0xbd, 0xf6, 0x6f, 0xa1, 0x73, 0xff, 0xda, 0xdf, 0x18, 0x80, 0x71, 0xa3, + 0xb2, 0x76, 0xd8, 0xc3, 0x9e, 0x7b, 0x99, 0x1e, 0x40, 0x4e, 0x44, 0xc4, 0xfa, 0xb3, 0xbb, 0x2f, + 0x15, 0xfd, 0x90, 0xa2, 0x2b, 0x80, 0x97, 0x47, 0x9f, 0x84, 0x52, 0xc3, 0x09, 0x99, 0x7a, 0x91, + 0x88, 0x75, 0xd7, 0x0b, 0xb3, 0xf8, 0xf6, 0xe6, 0x84, 0x91, 0x76, 0xea, 0x73, 0xde, 0x31, 0x4b, + 0x7a, 0x52, 0x52, 0xf9, 0x4a, 0x3a, 0x21, 0xaa, 0x46, 0x50, 0x21, 0x6c, 0x0f, 0x73, 0x1c, 0x7a, + 0x89, 0x6d, 0xad, 0x74, 0x56, 0x2c, 0x50, 0x69, 0x94, 0x4d, 0xb3, 0x7e, 0x43, 0x22, 0x56, 0x38, + 0x6c, 0x50, 0xc6, 0x17, 0xa8, 0x81, 0x0e, 0x17, 0xa8, 0xa7, 0x60, 0x90, 0xfd, 0x50, 0x33, 0x40, + 0x8d, 0xc6, 0x0a, 0x07, 0x63, 0x89, 0x4f, 0x4e, 0x98, 0xa1, 0xde, 0x26, 0x0c, 0xbd, 0xa2, 0x89, + 0x49, 0xcd, 0x9c, 0x15, 0x86, 0xf8, 0x2e, 0x27, 0xa6, 0x3c, 0x96, 0x38, 0xf4, 0xb3, 0x16, 0x20, + 0xa7, 0x41, 0xaf, 0xb6, 0x14, 0xac, 0x6e, 0x22, 0xc0, 0x44, 0xed, 0x57, 0xba, 0x76, 0x7b, 0x3b, + 0x9c, 0x9d, 0x4b, 0x95, 0xe6, 0x6a, 0xcd, 0x97, 0x45, 0x13, 0x51, 0x9a, 0x40, 0x3f, 0x8c, 0xae, + 0xb9, 0x61, 0xf4, 0xb9, 0xff, 0x98, 0x38, 0x9c, 0x32, 0x9a, 0x84, 0x6e, 0xe8, 0x37, 0xa5, 0xe1, + 0x43, 0xde, 0x94, 0x46, 0xf3, 0x6e, 0x49, 0x33, 0x6d, 0x78, 0x28, 0xe7, 0x0b, 0x32, 0x94, 0xa5, + 0x8b, 0xba, 0xb2, 0xb4, 0x8b, 0x8a, 0x6d, 0x56, 0xd6, 0x31, 0xfb, 0x7a, 0xdb, 0xf1, 0x22, 0x37, + 0xda, 0xd3, 0x95, 0xab, 0xef, 0x85, 0xb1, 0x45, 0x87, 0x34, 0x7d, 0x6f, 0xc9, 0xab, 0xb7, 0x7c, + 0xd7, 0x8b, 0xd0, 0x34, 0xf4, 0x31, 0xe1, 0x83, 0x6f, 0xbd, 0x7d, 0xb4, 0xf7, 0x30, 0x83, 0xd8, + 0x5b, 0x70, 0x72, 0xd1, 0xbf, 0xed, 0xdd, 0x76, 0x82, 0xfa, 0x5c, 0x65, 0x45, 0x53, 0xfe, 0xac, + 0x49, 0xe5, 0x83, 0x95, 0x7f, 0xb5, 0xd3, 0x4a, 0xf2, 0xeb, 0xd0, 0xb2, 0xdb, 0x20, 0x39, 0x2a, + 0xba, 0xbf, 0x56, 0x30, 0x6a, 0x8a, 0xe9, 0x95, 0x91, 0xd8, 0xca, 0x35, 0x12, 0xbf, 0x0e, 0x43, + 0x9b, 0x2e, 0x69, 0xd4, 0x31, 0xd9, 0x14, 0xbd, 0xf3, 0x64, 0xbe, 0x1b, 0xd9, 0x32, 0xa5, 0x94, + 0x2a, 0x59, 0xae, 0xba, 0x58, 0x16, 0x85, 0xb1, 0x62, 0x83, 0x76, 0x60, 0x42, 0xf6, 0xa1, 0xc4, + 0x8a, 0xfd, 0xe0, 0xa9, 0x4e, 0x03, 0x6f, 0x32, 0x3f, 0x71, 0x6f, 0xbf, 0x3c, 0x81, 0x13, 0x6c, + 0x70, 0x8a, 0x31, 0x7a, 0x04, 0xfa, 0x9a, 0xf4, 0xe4, 0xeb, 0x63, 0xdd, 0xcf, 0x74, 0x15, 0x4c, + 0xed, 0xc2, 0xa0, 0xf6, 0x8f, 0x5b, 0xf0, 0x50, 0xaa, 0x67, 0x84, 0xfa, 0xe9, 0x88, 0x47, 0x21, + 0xa9, 0x0e, 0x2a, 0x74, 0x57, 0x07, 0xd9, 0x7f, 0xdb, 0x82, 0x13, 0x4b, 0xcd, 0x56, 0xb4, 0xb7, + 0xe8, 0x9a, 0x16, 0xdd, 0x0f, 0xc0, 0x40, 0x93, 0xd4, 0xdd, 0x76, 0x53, 0x8c, 0x5c, 0x59, 0x9e, + 0x0e, 0xab, 0x0c, 0x7a, 0xb0, 0x5f, 0x1e, 0xad, 0x46, 0x7e, 0xe0, 0x6c, 0x11, 0x0e, 0xc0, 0x82, + 0x9c, 0x9d, 0xb1, 0xee, 0x5d, 0x72, 0xcd, 0x6d, 0xba, 0xd1, 0xfd, 0xcd, 0x76, 0x61, 0x8c, 0x95, + 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0xba, 0x05, 0xe3, 0x72, 0xde, 0xcf, 0xd5, 0xeb, 0x01, 0x09, 0x43, + 0x34, 0x03, 0x05, 0xb7, 0x25, 0x5a, 0x09, 0xa2, 0x95, 0x85, 0x95, 0x0a, 0x2e, 0xb8, 0x2d, 0x29, + 0xce, 0xb3, 0x03, 0xa8, 0x68, 0xda, 0xa5, 0xaf, 0x08, 0x38, 0x56, 0x14, 0xe8, 0x02, 0x0c, 0x79, + 0x7e, 0x9d, 0x4b, 0xc4, 0x5c, 0x94, 0x60, 0x13, 0x6c, 0x4d, 0xc0, 0xb0, 0xc2, 0xa2, 0x0a, 0x94, + 0xb8, 0xd7, 0x62, 0x3c, 0x69, 0x7b, 0xf2, 0x7d, 0x64, 0x5f, 0xb6, 0x2e, 0x4b, 0xe2, 0x98, 0x89, + 0xfd, 0xeb, 0x16, 0x8c, 0xc8, 0x2f, 0xeb, 0xf1, 0xae, 0x42, 0x97, 0x56, 0x7c, 0x4f, 0x89, 0x97, + 0x16, 0xbd, 0x6b, 0x30, 0x8c, 0x71, 0xc5, 0x28, 0x1e, 0xea, 0x8a, 0x71, 0x09, 0x86, 0x9d, 0x56, + 0xab, 0x62, 0xde, 0x4f, 0xd8, 0x54, 0x9a, 0x8b, 0xc1, 0x58, 0xa7, 0xb1, 0x7f, 0xac, 0x00, 0x63, + 0xf2, 0x0b, 0xaa, 0xed, 0x8d, 0x90, 0x44, 0x68, 0x1d, 0x4a, 0x0e, 0x1f, 0x25, 0x22, 0x27, 0xf9, + 0x63, 0xd9, 0x4a, 0x2e, 0x63, 0x48, 0x63, 0x41, 0x6b, 0x4e, 0x96, 0xc6, 0x31, 0x23, 0xd4, 0x80, + 0x49, 0xcf, 0x8f, 0xd8, 0xa1, 0xab, 0xf0, 0x9d, 0xec, 0x8e, 0x49, 0xee, 0xa7, 0x05, 0xf7, 0xc9, + 0xb5, 0x24, 0x17, 0x9c, 0x66, 0x8c, 0x96, 0xa4, 0xe2, 0xb0, 0x98, 0xaf, 0x44, 0xd2, 0x07, 0x2e, + 0x5b, 0x6f, 0x68, 0xff, 0x8a, 0x05, 0x25, 0x49, 0x76, 0x1c, 0x26, 0xe6, 0x55, 0x18, 0x0c, 0xd9, + 0x20, 0xc8, 0xae, 0xb1, 0x3b, 0x35, 0x9c, 0x8f, 0x57, 0x2c, 0x4b, 0xf0, 0xff, 0x21, 0x96, 0x3c, + 0x98, 0xdd, 0x48, 0x35, 0xff, 0x1d, 0x62, 0x37, 0x52, 0xed, 0xc9, 0x39, 0x94, 0xfe, 0x88, 0xb5, + 0x59, 0x53, 0xc4, 0x52, 0x91, 0xb7, 0x15, 0x90, 0x4d, 0xf7, 0x4e, 0x52, 0xe4, 0xad, 0x30, 0x28, + 0x16, 0x58, 0xf4, 0x26, 0x8c, 0xd4, 0xa4, 0xc1, 0x20, 0x5e, 0xe1, 0xe7, 0x3b, 0x1a, 0xaf, 0x94, + 0x9d, 0x93, 0xeb, 0xd0, 0x16, 0xb4, 0xf2, 0xd8, 0xe0, 0x66, 0x7a, 0xe5, 0x14, 0xbb, 0x79, 0xe5, + 0xc4, 0x7c, 0xf3, 0x7d, 0x54, 0x7e, 0xc2, 0x82, 0x01, 0xae, 0x28, 0xee, 0x4d, 0x4f, 0xaf, 0x99, + 0x7d, 0xe3, 0xbe, 0xbb, 0x49, 0x81, 0x42, 0xd2, 0x40, 0xab, 0x50, 0x62, 0x3f, 0x98, 0xa2, 0xbb, + 0x98, 0xff, 0x68, 0x86, 0xd7, 0xaa, 0x37, 0xf0, 0xa6, 0x2c, 0x86, 0x63, 0x0e, 0xf6, 0x8f, 0x16, + 0xe9, 0xee, 0x16, 0x93, 0x1a, 0x87, 0xbe, 0xf5, 0xe0, 0x0e, 0xfd, 0xc2, 0x83, 0x3a, 0xf4, 0xb7, + 0x60, 0xbc, 0xa6, 0x19, 0x89, 0xe3, 0x91, 0xbc, 0xd0, 0x71, 0x92, 0x68, 0xf6, 0x64, 0xae, 0x9d, + 0x5b, 0x30, 0x99, 0xe0, 0x24, 0x57, 0xf4, 0x71, 0x18, 0xe1, 0xe3, 0x2c, 0x6a, 0xe1, 0x8e, 0x4d, + 0x4f, 0xe4, 0xcf, 0x17, 0xbd, 0x0a, 0xae, 0xcd, 0xd5, 0x8a, 0x63, 0x83, 0x99, 0xfd, 0xa7, 0x16, + 0xa0, 0xa5, 0xd6, 0x36, 0x69, 0x92, 0xc0, 0x69, 0xc4, 0xb6, 0x9e, 0x1f, 0xb2, 0x60, 0x9a, 0xa4, + 0xc0, 0x0b, 0x7e, 0xb3, 0x29, 0x2e, 0x8b, 0x39, 0xfa, 0x8c, 0xa5, 0x9c, 0x32, 0xea, 0x55, 0xd1, + 0x74, 0x1e, 0x05, 0xce, 0xad, 0x0f, 0xad, 0xc2, 0x14, 0x3f, 0x25, 0x15, 0x42, 0x73, 0x92, 0x7a, + 0x58, 0x30, 0x9e, 0x5a, 0x4f, 0x93, 0xe0, 0xac, 0x72, 0xf6, 0x37, 0x47, 0x20, 0xb7, 0x15, 0xef, + 0x1a, 0xb9, 0xde, 0x35, 0x72, 0xbd, 0x6b, 0xe4, 0x7a, 0xd7, 0xc8, 0xf5, 0xae, 0x91, 0xeb, 0x5d, + 0x23, 0xd7, 0x51, 0x18, 0xb9, 0xfe, 0x8a, 0x05, 0x27, 0xd5, 0x59, 0x63, 0xdc, 0xae, 0x3f, 0x03, + 0x53, 0x7c, 0xb9, 0x19, 0xde, 0xbb, 0xe2, 0x6c, 0xbd, 0x94, 0x39, 0x73, 0x13, 0x5e, 0xe6, 0x46, + 0x41, 0xfe, 0x5c, 0x27, 0x03, 0x81, 0xb3, 0xaa, 0xb1, 0x7f, 0x71, 0x08, 0xfa, 0x97, 0x76, 0x89, + 0x17, 0x1d, 0xc3, 0x3d, 0xa4, 0x06, 0x63, 0xae, 0xb7, 0xeb, 0x37, 0x76, 0x49, 0x9d, 0xe3, 0x0f, + 0x73, 0x5d, 0x3e, 0x25, 0x58, 0x8f, 0xad, 0x18, 0x2c, 0x70, 0x82, 0xe5, 0x83, 0x30, 0x15, 0x5c, + 0x86, 0x01, 0x7e, 0x52, 0x08, 0x3b, 0x41, 0xe6, 0x9e, 0xcd, 0x3a, 0x51, 0x9c, 0x7f, 0xb1, 0x19, + 0x83, 0x9f, 0x44, 0xa2, 0x38, 0xfa, 0x34, 0x8c, 0x6d, 0xba, 0x41, 0x18, 0xad, 0xbb, 0x4d, 0x12, + 0x46, 0x4e, 0xb3, 0x75, 0x1f, 0xa6, 0x01, 0xd5, 0x0f, 0xcb, 0x06, 0x27, 0x9c, 0xe0, 0x8c, 0xb6, + 0x60, 0xb4, 0xe1, 0xe8, 0x55, 0x0d, 0x1e, 0xba, 0x2a, 0x75, 0x3a, 0x5c, 0xd3, 0x19, 0x61, 0x93, + 0x2f, 0x5d, 0x4e, 0x35, 0xa6, 0xdd, 0x1e, 0x62, 0xba, 0x07, 0xb5, 0x9c, 0xb8, 0x5a, 0x9b, 0xe3, + 0xa8, 0x34, 0xc5, 0x5c, 0xc4, 0x4b, 0xa6, 0x34, 0xa5, 0x39, 0x82, 0x7f, 0x0a, 0x4a, 0x84, 0x76, + 0x21, 0x65, 0x2c, 0x0e, 0x98, 0x8b, 0xbd, 0xb5, 0x75, 0xd5, 0xad, 0x05, 0xbe, 0x69, 0x94, 0x59, + 0x92, 0x9c, 0x70, 0xcc, 0x14, 0x2d, 0xc0, 0x40, 0x48, 0x02, 0x57, 0x29, 0x7e, 0x3b, 0x0c, 0x23, + 0x23, 0xe3, 0xef, 0xc1, 0xf8, 0x6f, 0x2c, 0x8a, 0xd2, 0xe9, 0xe5, 0x30, 0xbd, 0x29, 0x3b, 0x0c, + 0xb4, 0xe9, 0x35, 0xc7, 0xa0, 0x58, 0x60, 0xd1, 0x6b, 0x30, 0x18, 0x90, 0x06, 0xb3, 0xfa, 0x8d, + 0xf6, 0x3e, 0xc9, 0xb9, 0x11, 0x91, 0x97, 0xc3, 0x92, 0x01, 0xba, 0x0a, 0x28, 0x20, 0x54, 0x1a, + 0x73, 0xbd, 0x2d, 0xe5, 0x38, 0x2d, 0x36, 0x5a, 0x25, 0xf5, 0xe2, 0x98, 0x42, 0x3e, 0x05, 0xc4, + 0x19, 0xc5, 0xd0, 0x65, 0x98, 0x54, 0xd0, 0x15, 0x2f, 0x8c, 0x1c, 0xba, 0xc1, 0x8d, 0x33, 0x5e, + 0x4a, 0x19, 0x82, 0x93, 0x04, 0x38, 0x5d, 0xc6, 0xfe, 0x92, 0x05, 0xbc, 0x9f, 0x8f, 0x41, 0x05, + 0xf0, 0xaa, 0xa9, 0x02, 0x38, 0x9d, 0x3b, 0x72, 0x39, 0xd7, 0xff, 0x2f, 0x59, 0x30, 0xac, 0x8d, + 0x6c, 0x3c, 0x67, 0xad, 0x0e, 0x73, 0xb6, 0x0d, 0x13, 0x74, 0xa6, 0x5f, 0xdf, 0x08, 0x49, 0xb0, + 0x4b, 0xea, 0x6c, 0x62, 0x16, 0xee, 0x6f, 0x62, 0x2a, 0x27, 0xcd, 0x6b, 0x09, 0x86, 0x38, 0x55, + 0x85, 0xfd, 0x29, 0xd9, 0x54, 0xe5, 0xd3, 0x5a, 0x53, 0x63, 0x9e, 0xf0, 0x69, 0x55, 0xa3, 0x8a, + 0x63, 0x1a, 0xba, 0xd4, 0xb6, 0xfd, 0x30, 0x4a, 0xfa, 0xb4, 0x5e, 0xf1, 0xc3, 0x08, 0x33, 0x8c, + 0xfd, 0x3c, 0xc0, 0xd2, 0x1d, 0x52, 0xe3, 0x33, 0x56, 0xbf, 0xa1, 0x58, 0xf9, 0x37, 0x14, 0xfb, + 0x77, 0x2c, 0x18, 0x5b, 0x5e, 0x30, 0x4e, 0xae, 0x59, 0x00, 0x7e, 0xad, 0xba, 0x75, 0x6b, 0x4d, + 0xfa, 0x6a, 0x70, 0x73, 0xb5, 0x82, 0x62, 0x8d, 0x02, 0x9d, 0x86, 0x62, 0xa3, 0xed, 0x09, 0x1d, + 0xe5, 0x20, 0x3d, 0x1e, 0xaf, 0xb5, 0x3d, 0x4c, 0x61, 0xda, 0x33, 0xa0, 0x62, 0xcf, 0xcf, 0x80, + 0xba, 0x86, 0xff, 0x40, 0x65, 0xe8, 0xbf, 0x7d, 0xdb, 0xad, 0xf3, 0x47, 0xd6, 0xc2, 0x8f, 0xe4, + 0xd6, 0xad, 0x95, 0xc5, 0x10, 0x73, 0xb8, 0xfd, 0x85, 0x22, 0xcc, 0x2c, 0x37, 0xc8, 0x9d, 0xb7, + 0xf9, 0xd0, 0xbc, 0xd7, 0x47, 0x4c, 0x87, 0xd3, 0xf6, 0x1c, 0xf6, 0xa1, 0x5a, 0xf7, 0xfe, 0xd8, + 0x84, 0x41, 0xee, 0xd2, 0x29, 0x9f, 0x9d, 0x67, 0xda, 0xe6, 0xf2, 0x3b, 0x64, 0x96, 0xbb, 0x86, + 0x0a, 0xdb, 0x9c, 0x3a, 0x30, 0x05, 0x14, 0x4b, 0xe6, 0x33, 0x2f, 0xc3, 0x88, 0x4e, 0x79, 0xa8, + 0x27, 0xa3, 0xdf, 0x53, 0x84, 0x09, 0xda, 0x82, 0x07, 0x3a, 0x10, 0x37, 0xd2, 0x03, 0x71, 0xd4, + 0xcf, 0x06, 0xbb, 0x8f, 0xc6, 0x9b, 0xc9, 0xd1, 0xb8, 0x94, 0x37, 0x1a, 0xc7, 0x3d, 0x06, 0xdf, + 0x6b, 0xc1, 0xd4, 0x72, 0xc3, 0xaf, 0xed, 0x24, 0x9e, 0xf6, 0xbd, 0x08, 0xc3, 0x74, 0x3b, 0x0e, + 0x8d, 0x28, 0x17, 0x46, 0xdc, 0x13, 0x81, 0xc2, 0x3a, 0x9d, 0x56, 0xec, 0xc6, 0x8d, 0x95, 0xc5, + 0xac, 0x70, 0x29, 0x02, 0x85, 0x75, 0x3a, 0xfb, 0xb7, 0x2c, 0x38, 0x73, 0x79, 0x61, 0x29, 0x9e, + 0x8a, 0xa9, 0x88, 0x2d, 0xe7, 0x61, 0xa0, 0x55, 0xd7, 0x9a, 0x12, 0xeb, 0x70, 0x17, 0x59, 0x2b, + 0x04, 0xf6, 0x9d, 0x12, 0x8d, 0xe8, 0x06, 0xc0, 0x65, 0x5c, 0x59, 0x10, 0xfb, 0xae, 0x34, 0xd9, + 0x58, 0xb9, 0x26, 0x9b, 0x27, 0x60, 0x90, 0x9e, 0x0b, 0x6e, 0x4d, 0xb6, 0x9b, 0x5b, 0xdf, 0x39, + 0x08, 0x4b, 0x9c, 0xfd, 0x73, 0x16, 0x4c, 0x5d, 0x76, 0x23, 0x7a, 0x68, 0x27, 0x43, 0x92, 0xd0, + 0x53, 0x3b, 0x74, 0x23, 0x3f, 0xd8, 0x4b, 0x86, 0x24, 0xc1, 0x0a, 0x83, 0x35, 0x2a, 0xfe, 0x41, + 0xbb, 0x2e, 0x7b, 0xa3, 0x50, 0x30, 0x8d, 0x64, 0x58, 0xc0, 0xb1, 0xa2, 0xa0, 0xfd, 0x55, 0x77, + 0x03, 0xa6, 0x5f, 0xdc, 0x13, 0x1b, 0xb7, 0xea, 0xaf, 0x45, 0x89, 0xc0, 0x31, 0x8d, 0xfd, 0xc7, + 0x16, 0x94, 0x2f, 0x37, 0xda, 0x61, 0x44, 0x82, 0xcd, 0x30, 0x67, 0xd3, 0x7d, 0x1e, 0x4a, 0x44, + 0x6a, 0xf3, 0xe5, 0x63, 0x4a, 0x29, 0x88, 0x2a, 0x35, 0x3f, 0x8f, 0x8c, 0xa2, 0xe8, 0x7a, 0x78, + 0x7f, 0x7c, 0xb8, 0x07, 0xa4, 0xcb, 0x80, 0x88, 0x5e, 0x97, 0x1e, 0x2a, 0x86, 0xc5, 0x9c, 0x58, + 0x4a, 0x61, 0x71, 0x46, 0x09, 0xfb, 0xc7, 0x2d, 0x38, 0xa9, 0x3e, 0xf8, 0x1d, 0xf7, 0x99, 0xf6, + 0x57, 0x0b, 0x30, 0x7a, 0x65, 0x7d, 0xbd, 0x72, 0x99, 0x44, 0xda, 0xac, 0xec, 0x6c, 0xa3, 0xc7, + 0x9a, 0xa9, 0xb1, 0xd3, 0x1d, 0xb1, 0x1d, 0xb9, 0x8d, 0x59, 0x1e, 0x71, 0x6c, 0x76, 0xc5, 0x8b, + 0xae, 0x07, 0xd5, 0x28, 0x70, 0xbd, 0xad, 0xcc, 0x99, 0x2e, 0x65, 0x96, 0x62, 0x9e, 0xcc, 0x82, + 0x9e, 0x87, 0x01, 0x16, 0xf2, 0x4c, 0x0e, 0xc2, 0xc3, 0xea, 0x8a, 0xc5, 0xa0, 0x07, 0xfb, 0xe5, + 0xd2, 0x0d, 0xbc, 0xc2, 0xff, 0x60, 0x41, 0x8a, 0x6e, 0xc0, 0xf0, 0x76, 0x14, 0xb5, 0xae, 0x10, + 0xa7, 0x4e, 0x02, 0xb9, 0xcb, 0x9e, 0xcd, 0xda, 0x65, 0x69, 0x27, 0x70, 0xb2, 0x78, 0x63, 0x8a, + 0x61, 0x21, 0xd6, 0xf9, 0xd8, 0x55, 0x80, 0x18, 0x77, 0x44, 0x56, 0x16, 0x7b, 0x1d, 0x4a, 0xf4, + 0x73, 0xe7, 0x1a, 0xae, 0xd3, 0xd9, 0x8e, 0xfd, 0x34, 0x94, 0xa4, 0x95, 0x3a, 0x14, 0xf1, 0x11, + 0xd8, 0x89, 0x24, 0x8d, 0xd8, 0x21, 0x8e, 0xf1, 0xf6, 0x26, 0x9c, 0x60, 0xbe, 0xaa, 0x4e, 0xb4, + 0x6d, 0xcc, 0xbe, 0xee, 0xc3, 0xfc, 0x8c, 0xb8, 0xb1, 0xf1, 0x36, 0x4f, 0x6b, 0x0f, 0x7a, 0x47, + 0x24, 0xc7, 0xf8, 0xf6, 0x66, 0x7f, 0xb3, 0x0f, 0x1e, 0x5e, 0xa9, 0xe6, 0x87, 0xec, 0x79, 0x09, + 0x46, 0xb8, 0x20, 0x48, 0x07, 0xdd, 0x69, 0x88, 0x7a, 0x95, 0x6e, 0x73, 0x5d, 0xc3, 0x61, 0x83, + 0x12, 0x9d, 0x81, 0xa2, 0xfb, 0x96, 0x97, 0x7c, 0xee, 0xb6, 0xf2, 0xfa, 0x1a, 0xa6, 0x70, 0x8a, + 0xa6, 0x32, 0x25, 0xdf, 0xac, 0x15, 0x5a, 0xc9, 0x95, 0xaf, 0xc2, 0x98, 0x1b, 0xd6, 0x42, 0x77, + 0xc5, 0xa3, 0x2b, 0x50, 0x5b, 0xc3, 0x4a, 0x9b, 0x40, 0x1b, 0xad, 0xb0, 0x38, 0x41, 0xad, 0x9d, + 0x1c, 0xfd, 0x3d, 0xcb, 0xa5, 0x5d, 0x03, 0x06, 0xd0, 0x8d, 0xbd, 0xc5, 0xbe, 0x2e, 0x64, 0x9a, + 0x70, 0xb1, 0xb1, 0xf3, 0x0f, 0x0e, 0xb1, 0xc4, 0xd1, 0xab, 0x5a, 0x6d, 0xdb, 0x69, 0xcd, 0xb5, + 0xa3, 0xed, 0x45, 0x37, 0xac, 0xf9, 0xbb, 0x24, 0xd8, 0x63, 0xb7, 0xec, 0xa1, 0xf8, 0xaa, 0xa6, + 0x10, 0x0b, 0x57, 0xe6, 0x2a, 0x94, 0x12, 0xa7, 0xcb, 0xa0, 0x39, 0x18, 0x97, 0xc0, 0x2a, 0x09, + 0xd9, 0xe6, 0x3e, 0xcc, 0xd8, 0xa8, 0x07, 0x68, 0x02, 0xac, 0x98, 0x24, 0xe9, 0x4d, 0xd1, 0x15, + 0x8e, 0x42, 0x74, 0xfd, 0x00, 0x8c, 0xba, 0x9e, 0x1b, 0xb9, 0x4e, 0xe4, 0x73, 0x33, 0x0e, 0xbf, + 0x50, 0x33, 0xd5, 0xf1, 0x8a, 0x8e, 0xc0, 0x26, 0x9d, 0xfd, 0x9f, 0xfa, 0x60, 0x92, 0x0d, 0xdb, + 0xbb, 0x33, 0xec, 0x3b, 0x69, 0x86, 0xdd, 0x48, 0xcf, 0xb0, 0xa3, 0x90, 0xc9, 0xef, 0x7b, 0x9a, + 0x7d, 0x1a, 0x4a, 0xea, 0xcd, 0x9d, 0x7c, 0x74, 0x6b, 0xe5, 0x3c, 0xba, 0xed, 0x7e, 0x2e, 0x4b, + 0xcf, 0xb0, 0x62, 0xa6, 0x67, 0xd8, 0x97, 0x2d, 0x88, 0x4d, 0x06, 0xe8, 0x75, 0x28, 0xb5, 0x7c, + 0xe6, 0x68, 0x1a, 0x48, 0xef, 0xed, 0xc7, 0x3b, 0xda, 0x1c, 0x78, 0xd4, 0xb2, 0x80, 0xf7, 0x42, + 0x45, 0x16, 0xc5, 0x31, 0x17, 0x74, 0x15, 0x06, 0x5b, 0x01, 0xa9, 0x46, 0x2c, 0xa4, 0x4e, 0xef, + 0x0c, 0xf9, 0xac, 0xe1, 0x05, 0xb1, 0xe4, 0x60, 0xff, 0x67, 0x0b, 0x26, 0x92, 0xa4, 0xe8, 0x43, + 0xd0, 0x47, 0xee, 0x90, 0x9a, 0x68, 0x6f, 0xe6, 0x21, 0x1b, 0x2b, 0x1d, 0x78, 0x07, 0xd0, 0xff, + 0x98, 0x95, 0x42, 0x57, 0x60, 0x90, 0x9e, 0xb0, 0x97, 0x55, 0xf8, 0xb8, 0x47, 0xf3, 0x4e, 0x69, + 0x25, 0xaa, 0xf0, 0xc6, 0x09, 0x10, 0x96, 0xc5, 0x99, 0x3b, 0x56, 0xad, 0x55, 0xa5, 0x97, 0x97, + 0xa8, 0xd3, 0x1d, 0x7b, 0x7d, 0xa1, 0xc2, 0x89, 0x04, 0x37, 0xee, 0x8e, 0x25, 0x81, 0x38, 0x66, + 0x62, 0xff, 0xbc, 0x05, 0xc0, 0xbd, 0xcf, 0x1c, 0x6f, 0x8b, 0x1c, 0x83, 0x9e, 0x7c, 0x11, 0xfa, + 0xc2, 0x16, 0xa9, 0x75, 0xf2, 0x81, 0x8e, 0xdb, 0x53, 0x6d, 0x91, 0x5a, 0x3c, 0xe3, 0xe8, 0x3f, + 0xcc, 0x4a, 0xdb, 0xdf, 0x07, 0x30, 0x16, 0x93, 0xad, 0x44, 0xa4, 0x89, 0x9e, 0x35, 0x02, 0x75, + 0x9c, 0x4e, 0x04, 0xea, 0x28, 0x31, 0x6a, 0x4d, 0x25, 0xfb, 0x69, 0x28, 0x36, 0x9d, 0x3b, 0x42, + 0xe7, 0xf6, 0x74, 0xe7, 0x66, 0x50, 0xfe, 0xb3, 0xab, 0xce, 0x1d, 0x7e, 0x2d, 0x7d, 0x5a, 0xae, + 0x90, 0x55, 0xe7, 0x4e, 0x57, 0x3f, 0x5d, 0x5a, 0x09, 0xab, 0xcb, 0xf5, 0x84, 0x63, 0x55, 0x4f, + 0x75, 0xb9, 0x5e, 0xb2, 0x2e, 0xd7, 0xeb, 0xa1, 0x2e, 0xd7, 0x43, 0x77, 0x61, 0x50, 0xf8, 0x3d, + 0x8a, 0x50, 0x5e, 0x17, 0x7b, 0xa8, 0x4f, 0xb8, 0x4d, 0xf2, 0x3a, 0x2f, 0xca, 0x6b, 0xb7, 0x80, + 0x76, 0xad, 0x57, 0x56, 0x88, 0xfe, 0xaa, 0x05, 0x63, 0xe2, 0x37, 0x26, 0x6f, 0xb5, 0x49, 0x18, + 0x09, 0xb1, 0xf4, 0xfd, 0xbd, 0xb7, 0x41, 0x14, 0xe4, 0x4d, 0x79, 0xbf, 0x3c, 0x67, 0x4c, 0x64, + 0xd7, 0x16, 0x25, 0x5a, 0x81, 0xfe, 0xae, 0x05, 0x27, 0x9a, 0xce, 0x1d, 0x5e, 0x23, 0x87, 0x61, + 0x27, 0x72, 0x7d, 0xe1, 0x3f, 0xf0, 0xa1, 0xde, 0x86, 0x3f, 0x55, 0x9c, 0x37, 0x52, 0xda, 0x1f, + 0x4f, 0x64, 0x91, 0x74, 0x6d, 0x6a, 0x66, 0xbb, 0x66, 0x36, 0x61, 0x48, 0xce, 0xb7, 0x07, 0xe9, + 0x64, 0xcd, 0xea, 0x11, 0x73, 0xed, 0x81, 0xd6, 0xf3, 0x69, 0x18, 0xd1, 0xe7, 0xd8, 0x03, 0xad, + 0xeb, 0x2d, 0x98, 0xca, 0x98, 0x4b, 0x0f, 0xb4, 0xca, 0xdb, 0x70, 0x3a, 0x77, 0x7e, 0x3c, 0x50, + 0x27, 0xf9, 0xaf, 0x5a, 0xfa, 0x3e, 0x78, 0x0c, 0xc6, 0x8a, 0x05, 0xd3, 0x58, 0x71, 0xb6, 0xf3, + 0xca, 0xc9, 0xb1, 0x58, 0xbc, 0xa9, 0x37, 0x9a, 0xee, 0xea, 0xe8, 0x35, 0x18, 0x68, 0x50, 0x88, + 0xf4, 0x9e, 0xb5, 0xbb, 0xaf, 0xc8, 0x58, 0x98, 0x64, 0xf0, 0x10, 0x0b, 0x0e, 0xf6, 0x2f, 0x59, + 0xd0, 0x77, 0x0c, 0x3d, 0x81, 0xcd, 0x9e, 0x78, 0x36, 0x97, 0xb5, 0x88, 0x6a, 0x3e, 0x8b, 0x9d, + 0xdb, 0x4b, 0x77, 0x22, 0xe2, 0x85, 0xec, 0x44, 0xce, 0xec, 0x98, 0x9f, 0xb6, 0x60, 0xea, 0x9a, + 0xef, 0xd4, 0xe7, 0x9d, 0x86, 0xe3, 0xd5, 0x48, 0xb0, 0xe2, 0x6d, 0x1d, 0xca, 0xf5, 0xbb, 0xd0, + 0xd5, 0xf5, 0x7b, 0x41, 0x7a, 0x4e, 0xf5, 0xe5, 0x8f, 0x1f, 0x95, 0xa4, 0x93, 0xa1, 0x8b, 0x0c, + 0x1f, 0xdf, 0x6d, 0x40, 0x7a, 0x2b, 0xc5, 0x03, 0x28, 0x0c, 0x83, 0x2e, 0x6f, 0xaf, 0x18, 0xc4, + 0x27, 0xb3, 0x25, 0xdc, 0xd4, 0xe7, 0x69, 0x4f, 0x7b, 0x38, 0x00, 0x4b, 0x46, 0xf6, 0x4b, 0x90, + 0x19, 0x6a, 0xa2, 0xbb, 0x5e, 0xc2, 0xfe, 0x18, 0x4c, 0xb2, 0x92, 0x87, 0xd4, 0x0c, 0xd8, 0x09, + 0x6d, 0x6a, 0x46, 0xd8, 0x4c, 0xfb, 0xf3, 0x16, 0x8c, 0xaf, 0x25, 0xa2, 0x09, 0x9e, 0x67, 0xf6, + 0xd7, 0x0c, 0x25, 0x7e, 0x95, 0x41, 0xb1, 0xc0, 0x1e, 0xb9, 0x92, 0xeb, 0xcf, 0x2d, 0x88, 0xa3, + 0xbf, 0x1c, 0x83, 0xf8, 0xb6, 0x60, 0x88, 0x6f, 0x99, 0x82, 0xac, 0x6a, 0x4e, 0x9e, 0xf4, 0x86, + 0xae, 0xaa, 0xb8, 0x68, 0x1d, 0x64, 0xd8, 0x98, 0x0d, 0x9f, 0x8a, 0x63, 0x66, 0xf0, 0x34, 0x19, + 0x29, 0xcd, 0xfe, 0xdd, 0x02, 0x20, 0x45, 0xdb, 0x73, 0xdc, 0xb6, 0x74, 0x89, 0xa3, 0x89, 0xdb, + 0xb6, 0x0b, 0x88, 0x79, 0x10, 0x04, 0x8e, 0x17, 0x72, 0xb6, 0xae, 0x50, 0xeb, 0x1d, 0xce, 0x3d, + 0x61, 0x46, 0xbe, 0x0d, 0xbb, 0x96, 0xe2, 0x86, 0x33, 0x6a, 0xd0, 0x3c, 0x43, 0xfa, 0x7b, 0xf5, + 0x0c, 0x19, 0xe8, 0xf2, 0xc8, 0xf1, 0x2b, 0x16, 0x8c, 0xaa, 0x6e, 0x7a, 0x87, 0xb8, 0xc2, 0xab, + 0xf6, 0xe4, 0x6c, 0xa0, 0x15, 0xad, 0xc9, 0xec, 0x60, 0xf9, 0x2e, 0xf6, 0x58, 0xd5, 0x69, 0xb8, + 0x77, 0x89, 0x8a, 0xf3, 0x59, 0x16, 0x8f, 0x4f, 0x05, 0xf4, 0x60, 0xbf, 0x3c, 0xaa, 0xfe, 0xf1, + 0x38, 0xe6, 0x71, 0x11, 0xba, 0x25, 0x8f, 0x27, 0xa6, 0x22, 0x7a, 0x11, 0xfa, 0x5b, 0xdb, 0x4e, + 0x48, 0x12, 0x4f, 0x86, 0xfa, 0x2b, 0x14, 0x78, 0xb0, 0x5f, 0x1e, 0x53, 0x05, 0x18, 0x04, 0x73, + 0xea, 0xde, 0xa3, 0xe1, 0xa5, 0x27, 0x67, 0xd7, 0x68, 0x78, 0x7f, 0x6a, 0x41, 0xdf, 0x9a, 0x5f, + 0x3f, 0x8e, 0x2d, 0xe0, 0x55, 0x63, 0x0b, 0x78, 0x24, 0x2f, 0xc5, 0x44, 0xee, 0xea, 0x5f, 0x4e, + 0xac, 0xfe, 0xb3, 0xb9, 0x1c, 0x3a, 0x2f, 0xfc, 0x26, 0x0c, 0xb3, 0xc4, 0x15, 0xe2, 0x79, 0xd4, + 0xf3, 0xc6, 0x82, 0x2f, 0x27, 0x16, 0xfc, 0xb8, 0x46, 0xaa, 0xad, 0xf4, 0xa7, 0x60, 0x50, 0xbc, + 0xb7, 0x49, 0xbe, 0xf9, 0x15, 0xb4, 0x58, 0xe2, 0xed, 0x9f, 0x28, 0x82, 0x91, 0x28, 0x03, 0xfd, + 0x8a, 0x05, 0xb3, 0x01, 0xf7, 0xc3, 0xad, 0x2f, 0xb6, 0x03, 0xd7, 0xdb, 0xaa, 0xd6, 0xb6, 0x49, + 0xbd, 0xdd, 0x70, 0xbd, 0xad, 0x95, 0x2d, 0xcf, 0x57, 0xe0, 0xa5, 0x3b, 0xa4, 0xd6, 0x66, 0x66, + 0xb7, 0x2e, 0x59, 0x39, 0x94, 0x3f, 0xfb, 0x73, 0xf7, 0xf6, 0xcb, 0xb3, 0xf8, 0x50, 0xbc, 0xf1, + 0x21, 0xdb, 0x82, 0x7e, 0xcb, 0x82, 0x8b, 0x3c, 0x7f, 0x44, 0xef, 0xed, 0xef, 0x70, 0x5b, 0xae, + 0x48, 0x56, 0x31, 0x93, 0x75, 0x12, 0x34, 0xe7, 0x3f, 0x20, 0x3a, 0xf4, 0x62, 0xe5, 0x70, 0x75, + 0xe1, 0xc3, 0x36, 0xce, 0xfe, 0xc7, 0x45, 0x18, 0x15, 0x51, 0xd3, 0xc4, 0x19, 0xf0, 0xa2, 0x31, + 0x25, 0x1e, 0x4d, 0x4c, 0x89, 0x49, 0x83, 0xf8, 0x68, 0xb6, 0xff, 0x10, 0x26, 0xe9, 0xe6, 0x7c, + 0x85, 0x38, 0x41, 0xb4, 0x41, 0x1c, 0xee, 0xf0, 0x55, 0x3c, 0xf4, 0xee, 0xaf, 0xf4, 0x93, 0xd7, + 0x92, 0xcc, 0x70, 0x9a, 0xff, 0x77, 0xd2, 0x99, 0xe3, 0xc1, 0x44, 0x2a, 0xf0, 0xdd, 0x1b, 0x50, + 0x52, 0x8f, 0x45, 0xc4, 0xa6, 0xd3, 0x39, 0x7e, 0x64, 0x92, 0x03, 0x57, 0x7f, 0xc5, 0x0f, 0x95, + 0x62, 0x76, 0xf6, 0xdf, 0x2f, 0x18, 0x15, 0xf2, 0x41, 0x5c, 0x83, 0x21, 0x27, 0x0c, 0xdd, 0x2d, + 0x8f, 0xd4, 0x3b, 0x69, 0x28, 0x53, 0xd5, 0xb0, 0x07, 0x3b, 0x73, 0xa2, 0x24, 0x56, 0x3c, 0xd0, + 0x15, 0xee, 0x56, 0xb7, 0x4b, 0x3a, 0xa9, 0x27, 0x53, 0xdc, 0x40, 0x3a, 0xde, 0xed, 0x12, 0x2c, + 0xca, 0xa3, 0x4f, 0x70, 0xbf, 0xc7, 0xab, 0x9e, 0x7f, 0xdb, 0xbb, 0xec, 0xfb, 0x32, 0xe8, 0x46, + 0x6f, 0x0c, 0x27, 0xa5, 0xb7, 0xa3, 0x2a, 0x8e, 0x4d, 0x6e, 0xbd, 0x45, 0x92, 0xfd, 0x0c, 0xb0, + 0x78, 0xf9, 0xe6, 0xdb, 0xec, 0x10, 0x11, 0x18, 0x17, 0x21, 0xf9, 0x24, 0x4c, 0xf4, 0x5d, 0xe6, + 0x55, 0xce, 0x2c, 0x1d, 0x2b, 0xd2, 0xaf, 0x9a, 0x2c, 0x70, 0x92, 0xa7, 0xfd, 0xb3, 0x16, 0xb0, + 0x77, 0xaa, 0xc7, 0x20, 0x8f, 0x7c, 0xd8, 0x94, 0x47, 0xa6, 0xf3, 0x3a, 0x39, 0x47, 0x14, 0x79, + 0x81, 0xcf, 0xac, 0x4a, 0xe0, 0xdf, 0xd9, 0x13, 0xce, 0x2a, 0xdd, 0xef, 0x1f, 0xf6, 0xff, 0xb6, + 0xf8, 0x26, 0x16, 0xbf, 0xea, 0xff, 0x2c, 0x0c, 0xd5, 0x9c, 0x96, 0x53, 0xe3, 0x59, 0x9d, 0x72, + 0x35, 0x7a, 0x46, 0xa1, 0xd9, 0x05, 0x51, 0x82, 0x6b, 0xa8, 0x64, 0x68, 0xc7, 0x21, 0x09, 0xee, + 0xaa, 0x95, 0x52, 0x55, 0xce, 0xec, 0xc0, 0xa8, 0xc1, 0xec, 0x81, 0xaa, 0x33, 0x3e, 0xcb, 0x8f, + 0x58, 0x15, 0x8a, 0xb4, 0x09, 0x93, 0x9e, 0xf6, 0x9f, 0x1e, 0x28, 0xf2, 0x72, 0xf9, 0x78, 0xb7, + 0x43, 0x94, 0x9d, 0x3e, 0xda, 0x13, 0xd8, 0x04, 0x1b, 0x9c, 0xe6, 0x6c, 0xff, 0xa4, 0x05, 0x0f, + 0xe9, 0x84, 0xda, 0x2b, 0x9b, 0x6e, 0x46, 0x92, 0x45, 0x18, 0xf2, 0x5b, 0x24, 0x70, 0x22, 0x3f, + 0x10, 0xa7, 0xc6, 0x05, 0xd9, 0xe9, 0xd7, 0x05, 0xfc, 0x40, 0xe4, 0x28, 0x90, 0xdc, 0x25, 0x1c, + 0xab, 0x92, 0xf4, 0xf6, 0xc9, 0x3a, 0x23, 0x14, 0xef, 0xa9, 0xd8, 0x1e, 0xc0, 0x2c, 0xe9, 0x21, + 0x16, 0x18, 0xfb, 0x9b, 0x16, 0x9f, 0x58, 0x7a, 0xd3, 0xd1, 0x5b, 0x30, 0xd1, 0x74, 0xa2, 0xda, + 0xf6, 0xd2, 0x9d, 0x56, 0xc0, 0x4d, 0x4e, 0xb2, 0x9f, 0x9e, 0xee, 0xd6, 0x4f, 0xda, 0x47, 0xc6, + 0xae, 0x9c, 0xab, 0x09, 0x66, 0x38, 0xc5, 0x1e, 0x6d, 0xc0, 0x30, 0x83, 0xb1, 0xa7, 0x82, 0x61, + 0x27, 0xd1, 0x20, 0xaf, 0x36, 0xe5, 0x8c, 0xb0, 0x1a, 0xf3, 0xc1, 0x3a, 0x53, 0xfb, 0xcb, 0x45, + 0xbe, 0xda, 0x99, 0x28, 0xff, 0x14, 0x0c, 0xb6, 0xfc, 0xfa, 0xc2, 0xca, 0x22, 0x16, 0xa3, 0xa0, + 0x8e, 0x91, 0x0a, 0x07, 0x63, 0x89, 0x47, 0x17, 0x60, 0x48, 0xfc, 0x94, 0x26, 0x42, 0xb6, 0x37, + 0x0b, 0xba, 0x10, 0x2b, 0x2c, 0x7a, 0x0e, 0xa0, 0x15, 0xf8, 0xbb, 0x6e, 0x9d, 0x85, 0x0e, 0x29, + 0x9a, 0x7e, 0x44, 0x15, 0x85, 0xc1, 0x1a, 0x15, 0x7a, 0x05, 0x46, 0xdb, 0x5e, 0xc8, 0xc5, 0x11, + 0x2d, 0x9a, 0xb2, 0xf2, 0x70, 0xb9, 0xa1, 0x23, 0xb1, 0x49, 0x8b, 0xe6, 0x60, 0x20, 0x72, 0x98, + 0x5f, 0x4c, 0x7f, 0xbe, 0xbb, 0xef, 0x3a, 0xa5, 0xd0, 0x13, 0x08, 0xd1, 0x02, 0x58, 0x14, 0x44, + 0x6f, 0xc8, 0x57, 0xbb, 0x7c, 0x63, 0x17, 0x7e, 0xf6, 0xbd, 0x1d, 0x02, 0xda, 0x9b, 0x5d, 0xe1, + 0xbf, 0x6f, 0xf0, 0x42, 0x2f, 0x03, 0x90, 0x3b, 0x11, 0x09, 0x3c, 0xa7, 0xa1, 0xbc, 0xd9, 0x94, + 0x5c, 0xb0, 0xe8, 0xaf, 0xf9, 0xd1, 0x8d, 0x90, 0x2c, 0x29, 0x0a, 0xac, 0x51, 0xdb, 0xbf, 0x55, + 0x02, 0x88, 0xe5, 0x76, 0x74, 0x37, 0xb5, 0x71, 0x3d, 0xd3, 0x59, 0xd2, 0x3f, 0xba, 0x5d, 0x0b, + 0x7d, 0xbf, 0x05, 0xc3, 0x22, 0x42, 0x0a, 0x1b, 0xa1, 0x42, 0xe7, 0x8d, 0xd3, 0x0c, 0xd4, 0x42, + 0x4b, 0xf0, 0x26, 0x3c, 0x2f, 0x67, 0xa8, 0x86, 0xe9, 0xda, 0x0a, 0xbd, 0x62, 0xf4, 0x3e, 0x79, + 0x55, 0x2c, 0x1a, 0x5d, 0xa9, 0xae, 0x8a, 0x25, 0x76, 0x46, 0xe8, 0xb7, 0xc4, 0x1b, 0xc6, 0x2d, + 0xb1, 0x2f, 0xff, 0x59, 0xa2, 0x21, 0xbe, 0x76, 0xbb, 0x20, 0xa2, 0x8a, 0x1e, 0xa2, 0xa0, 0x3f, + 0xff, 0x79, 0x9e, 0x76, 0x4f, 0xea, 0x12, 0x9e, 0xe0, 0xd3, 0x30, 0x5e, 0x37, 0x85, 0x00, 0x31, + 0x13, 0x9f, 0xcc, 0xe3, 0x9b, 0x90, 0x19, 0xe2, 0x63, 0x3f, 0x81, 0xc0, 0x49, 0xc6, 0xa8, 0xc2, + 0x23, 0x56, 0xac, 0x78, 0x9b, 0xbe, 0x78, 0xeb, 0x61, 0xe7, 0x8e, 0xe5, 0x5e, 0x18, 0x91, 0x26, + 0xa5, 0x8c, 0x4f, 0xf7, 0x35, 0x51, 0x16, 0x2b, 0x2e, 0xe8, 0x35, 0x18, 0x60, 0xef, 0xb3, 0xc2, + 0xe9, 0xa1, 0x7c, 0x8d, 0xb3, 0x19, 0xba, 0x2f, 0x5e, 0x90, 0xec, 0x6f, 0x88, 0x05, 0x07, 0x74, + 0x45, 0xbe, 0x7e, 0x0c, 0x57, 0xbc, 0x1b, 0x21, 0x61, 0xaf, 0x1f, 0x4b, 0xf3, 0x8f, 0xc7, 0x0f, + 0x1b, 0x39, 0x3c, 0x33, 0xcd, 0xa0, 0x51, 0x92, 0x4a, 0x51, 0xe2, 0xbf, 0xcc, 0x5e, 0x28, 0x02, + 0x0d, 0x65, 0x36, 0xcf, 0xcc, 0x70, 0x18, 0x77, 0xe7, 0x4d, 0x93, 0x05, 0x4e, 0xf2, 0xa4, 0x12, + 0x29, 0x5f, 0xf5, 0xe2, 0xb5, 0x48, 0xb7, 0xbd, 0x83, 0x5f, 0xc4, 0xd9, 0x69, 0xc4, 0x21, 0x58, + 0x94, 0x3f, 0x56, 0xf1, 0x60, 0xc6, 0x83, 0x89, 0xe4, 0x12, 0x7d, 0xa0, 0xe2, 0xc8, 0x1f, 0xf6, + 0xc1, 0x98, 0x39, 0xa5, 0xd0, 0x45, 0x28, 0x09, 0x26, 0x2a, 0x03, 0x88, 0x5a, 0x25, 0xab, 0x12, + 0x81, 0x63, 0x1a, 0x96, 0xf8, 0x85, 0x15, 0xd7, 0xdc, 0x83, 0xe3, 0xc4, 0x2f, 0x0a, 0x83, 0x35, + 0x2a, 0x7a, 0xb1, 0xda, 0xf0, 0xfd, 0x48, 0x1d, 0x48, 0x6a, 0xde, 0xcd, 0x33, 0x28, 0x16, 0x58, + 0x7a, 0x10, 0xed, 0x90, 0xc0, 0x23, 0x0d, 0x33, 0xf2, 0xb6, 0x3a, 0x88, 0xae, 0xea, 0x48, 0x6c, + 0xd2, 0xd2, 0xe3, 0xd4, 0x0f, 0xd9, 0x44, 0x16, 0xd7, 0xb7, 0xd8, 0xdd, 0xba, 0xca, 0x5f, 0x79, + 0x4b, 0x3c, 0xfa, 0x18, 0x3c, 0xa4, 0x02, 0x67, 0x61, 0x6e, 0xcd, 0x90, 0x35, 0x0e, 0x18, 0xda, + 0x96, 0x87, 0x16, 0xb2, 0xc9, 0x70, 0x5e, 0x79, 0xf4, 0x2a, 0x8c, 0x09, 0x11, 0x5f, 0x72, 0x1c, + 0x34, 0x3d, 0x8c, 0xae, 0x1a, 0x58, 0x9c, 0xa0, 0x96, 0xb1, 0xc3, 0x99, 0x94, 0x2d, 0x39, 0x0c, + 0xa5, 0x63, 0x87, 0xeb, 0x78, 0x9c, 0x2a, 0x81, 0xe6, 0x60, 0x9c, 0xcb, 0x60, 0xae, 0xb7, 0xc5, + 0xc7, 0x44, 0x3c, 0xe6, 0x52, 0x4b, 0xea, 0xba, 0x89, 0xc6, 0x49, 0x7a, 0xf4, 0x12, 0x8c, 0x38, + 0x41, 0x6d, 0xdb, 0x8d, 0x48, 0x2d, 0x6a, 0x07, 0xfc, 0x95, 0x97, 0xe6, 0xa2, 0x35, 0xa7, 0xe1, + 0xb0, 0x41, 0x69, 0xdf, 0x85, 0xa9, 0x8c, 0xf0, 0x0f, 0x74, 0xe2, 0x38, 0x2d, 0x57, 0x7e, 0x53, + 0xc2, 0xc3, 0x79, 0xae, 0xb2, 0x22, 0xbf, 0x46, 0xa3, 0xa2, 0xb3, 0x93, 0x85, 0x89, 0xd0, 0x92, + 0x95, 0xaa, 0xd9, 0xb9, 0x2c, 0x11, 0x38, 0xa6, 0xb1, 0xff, 0x5b, 0x01, 0xc6, 0x33, 0x6c, 0x2b, + 0x2c, 0x61, 0x66, 0xe2, 0x92, 0x12, 0xe7, 0xc7, 0x34, 0x43, 0xd1, 0x17, 0x0e, 0x11, 0x8a, 0xbe, + 0xd8, 0x2d, 0x14, 0x7d, 0xdf, 0xdb, 0x09, 0x45, 0x6f, 0xf6, 0x58, 0x7f, 0x4f, 0x3d, 0x96, 0x11, + 0xbe, 0x7e, 0xe0, 0x90, 0xe1, 0xeb, 0x8d, 0x4e, 0x1f, 0xec, 0xa1, 0xd3, 0x7f, 0xb4, 0x00, 0x13, + 0x49, 0x57, 0xd2, 0x63, 0xd0, 0xdb, 0xbe, 0x66, 0xe8, 0x6d, 0x2f, 0xf4, 0xf2, 0xf8, 0x36, 0x57, + 0x87, 0x8b, 0x13, 0x3a, 0xdc, 0xf7, 0xf6, 0xc4, 0xad, 0xb3, 0x3e, 0xf7, 0xa7, 0x0a, 0x70, 0x32, + 0xf3, 0xf5, 0xef, 0x31, 0xf4, 0xcd, 0x75, 0xa3, 0x6f, 0x9e, 0xed, 0xf9, 0x61, 0x72, 0x6e, 0x07, + 0xdd, 0x4a, 0x74, 0xd0, 0xc5, 0xde, 0x59, 0x76, 0xee, 0xa5, 0xaf, 0x17, 0xe1, 0x6c, 0x66, 0xb9, + 0x58, 0xed, 0xb9, 0x6c, 0xa8, 0x3d, 0x9f, 0x4b, 0xa8, 0x3d, 0xed, 0xce, 0xa5, 0x8f, 0x46, 0x0f, + 0x2a, 0x1e, 0xe8, 0xb2, 0x30, 0x03, 0xf7, 0xa9, 0x03, 0x35, 0x1e, 0xe8, 0x2a, 0x46, 0xd8, 0xe4, + 0xfb, 0x9d, 0xa4, 0xfb, 0xfc, 0x97, 0x16, 0x9c, 0xce, 0x1c, 0x9b, 0x63, 0xd0, 0x75, 0xad, 0x99, + 0xba, 0xae, 0xa7, 0x7a, 0x9e, 0xad, 0x39, 0xca, 0xaf, 0x9f, 0xe9, 0xcf, 0xf9, 0x16, 0x76, 0x93, + 0xbf, 0x0e, 0xc3, 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xd5, 0xaf, 0xab, 0x40, 0xd8, 0xcf, 0xb2, 0x7b, + 0x56, 0x0c, 0x3e, 0xd8, 0x2f, 0xcf, 0x24, 0x59, 0xc4, 0x68, 0xac, 0x73, 0x40, 0x9f, 0x80, 0xa1, + 0x50, 0x9c, 0x9b, 0x62, 0xec, 0x9f, 0xef, 0xb1, 0x73, 0x9c, 0x0d, 0xd2, 0x30, 0x23, 0x2e, 0x29, + 0x4d, 0x85, 0x62, 0x69, 0x46, 0x67, 0x29, 0x1c, 0x69, 0x74, 0x96, 0xe7, 0x00, 0x76, 0xd5, 0x65, + 0x20, 0xa9, 0x7f, 0xd0, 0xae, 0x09, 0x1a, 0x15, 0xfa, 0x08, 0x4c, 0x84, 0x3c, 0x24, 0xe1, 0x42, + 0xc3, 0x09, 0xd9, 0x3b, 0x1a, 0x31, 0x0b, 0x59, 0x54, 0xa7, 0x6a, 0x02, 0x87, 0x53, 0xd4, 0x68, + 0x59, 0xd6, 0xca, 0xe2, 0x27, 0xf2, 0x89, 0x79, 0x3e, 0xae, 0x51, 0xa4, 0xeb, 0x3e, 0x91, 0xec, + 0x7e, 0xd6, 0xf1, 0x5a, 0x49, 0xf4, 0x09, 0x00, 0x3a, 0x7d, 0x84, 0x1e, 0x62, 0x30, 0x7f, 0xf3, + 0xa4, 0xbb, 0x4a, 0x3d, 0xd3, 0xb9, 0x99, 0xbd, 0xa9, 0x5d, 0x54, 0x4c, 0xb0, 0xc6, 0x10, 0x39, + 0x30, 0x1a, 0xff, 0x8b, 0xb3, 0xd9, 0x5e, 0xc8, 0xad, 0x21, 0xc9, 0x9c, 0xa9, 0xbc, 0x17, 0x75, + 0x16, 0xd8, 0xe4, 0x68, 0xff, 0xf8, 0x20, 0x3c, 0xdc, 0x61, 0x1b, 0x46, 0x73, 0xa6, 0xa9, 0xf7, + 0xe9, 0xe4, 0xfd, 0x7d, 0x26, 0xb3, 0xb0, 0x71, 0xa1, 0x4f, 0xcc, 0xf6, 0xc2, 0xdb, 0x9e, 0xed, + 0x3f, 0x6c, 0x69, 0x9a, 0x15, 0xee, 0x54, 0xfa, 0xe1, 0x43, 0x1e, 0x2f, 0x47, 0xa8, 0x6a, 0xd9, + 0xcc, 0xd0, 0x57, 0x3c, 0xd7, 0x73, 0x73, 0x7a, 0x57, 0x60, 0x7c, 0x35, 0x3b, 0x0e, 0x2f, 0x57, + 0x65, 0x5c, 0x3e, 0xec, 0xf7, 0x1f, 0x57, 0x4c, 0xde, 0x8f, 0xc9, 0xe8, 0x4b, 0xbc, 0x5e, 0xb1, + 0xd6, 0x5e, 0x8c, 0xc3, 0x29, 0xa9, 0xb3, 0xf4, 0xd1, 0xcc, 0xe6, 0xea, 0x44, 0xd8, 0x60, 0x75, + 0xbc, 0x57, 0xef, 0x6f, 0x51, 0x10, 0xe0, 0xdf, 0xb1, 0xe0, 0x4c, 0xc7, 0x88, 0x30, 0xdf, 0x86, + 0xb2, 0xa1, 0xfd, 0x39, 0x0b, 0xb2, 0x07, 0xdb, 0xf0, 0x28, 0xbb, 0x08, 0xa5, 0x5a, 0x22, 0xef, + 0x66, 0x1c, 0x1b, 0x41, 0xe5, 0xdc, 0x8c, 0x69, 0x0c, 0xc7, 0xb1, 0x42, 0x57, 0xc7, 0xb1, 0x5f, + 0xb7, 0x20, 0xb5, 0xbf, 0x1f, 0x83, 0xa0, 0xb1, 0x62, 0x0a, 0x1a, 0x8f, 0xf7, 0xd2, 0x9b, 0x39, + 0x32, 0xc6, 0x9f, 0x8c, 0xc3, 0xa9, 0x9c, 0x17, 0x79, 0xbb, 0x30, 0xb9, 0x55, 0x23, 0xe6, 0xe3, + 0xea, 0x4e, 0x41, 0x87, 0x3a, 0xbe, 0xc4, 0xe6, 0xe9, 0x4e, 0x53, 0x24, 0x38, 0x5d, 0x05, 0xfa, + 0x9c, 0x05, 0x27, 0x9c, 0xdb, 0xe1, 0x12, 0x15, 0x18, 0xdd, 0xda, 0x7c, 0xc3, 0xaf, 0xed, 0xd0, + 0xd3, 0x58, 0x2e, 0x84, 0x17, 0x32, 0x95, 0x78, 0xb7, 0xaa, 0x29, 0x7a, 0xa3, 0x7a, 0x96, 0xdc, + 0x3a, 0x8b, 0x0a, 0x67, 0xd6, 0x85, 0xb0, 0x48, 0xed, 0x41, 0xaf, 0xa3, 0x1d, 0x9e, 0xff, 0x67, + 0x3d, 0x9d, 0xe4, 0x12, 0x90, 0xc4, 0x60, 0xc5, 0x07, 0x7d, 0x0a, 0x4a, 0x5b, 0xf2, 0xa5, 0x6f, + 0x86, 0x84, 0x15, 0x77, 0x64, 0xe7, 0xf7, 0xcf, 0xdc, 0x12, 0xaf, 0x88, 0x70, 0xcc, 0x14, 0xbd, + 0x0a, 0x45, 0x6f, 0x33, 0xec, 0x94, 0x1f, 0x3a, 0xe1, 0x72, 0xc9, 0x83, 0x6c, 0xac, 0x2d, 0x57, + 0x31, 0x2d, 0x88, 0xae, 0x40, 0x31, 0xd8, 0xa8, 0x0b, 0x0d, 0x74, 0xe6, 0x22, 0xc5, 0xf3, 0x8b, + 0x39, 0xad, 0x62, 0x9c, 0xf0, 0xfc, 0x22, 0xa6, 0x2c, 0x50, 0x05, 0xfa, 0xd9, 0x33, 0x36, 0x21, + 0xcf, 0x64, 0xde, 0xdc, 0x3a, 0x3c, 0x07, 0xe5, 0x91, 0x38, 0x18, 0x01, 0xe6, 0x8c, 0xd0, 0x3a, + 0x0c, 0xd4, 0x58, 0x2e, 0x61, 0x21, 0xc0, 0xbc, 0x2f, 0x53, 0xd7, 0xdc, 0x21, 0xc9, 0xb2, 0x50, + 0xbd, 0x32, 0x0a, 0x2c, 0x78, 0x31, 0xae, 0xa4, 0xb5, 0xbd, 0x19, 0x8a, 0x5c, 0xfb, 0xd9, 0x5c, + 0x3b, 0xe4, 0x0e, 0x17, 0x5c, 0x19, 0x05, 0x16, 0xbc, 0xd0, 0xcb, 0x50, 0xd8, 0xac, 0x89, 0x27, + 0x6a, 0x99, 0x4a, 0x67, 0x33, 0x4e, 0xca, 0xfc, 0xc0, 0xbd, 0xfd, 0x72, 0x61, 0x79, 0x01, 0x17, + 0x36, 0x6b, 0x68, 0x0d, 0x06, 0x37, 0x79, 0x64, 0x05, 0xa1, 0x57, 0x7e, 0x32, 0x3b, 0xe8, 0x43, + 0x2a, 0xf8, 0x02, 0x7f, 0xee, 0x24, 0x10, 0x58, 0x32, 0x61, 0x99, 0x26, 0x54, 0x84, 0x08, 0x11, + 0xa0, 0x6e, 0xf6, 0x70, 0x51, 0x3d, 0xb8, 0x7c, 0x19, 0xc7, 0x99, 0xc0, 0x1a, 0x47, 0x3a, 0xab, + 0x9d, 0xbb, 0xed, 0x80, 0x85, 0x1a, 0x17, 0x91, 0x8c, 0x32, 0x67, 0xf5, 0x9c, 0x24, 0xea, 0x34, + 0xab, 0x15, 0x11, 0x8e, 0x99, 0xa2, 0x1d, 0x18, 0xdd, 0x0d, 0x5b, 0xdb, 0x44, 0x2e, 0x69, 0x16, + 0xd8, 0x28, 0x47, 0x3e, 0xba, 0x29, 0x08, 0xdd, 0x20, 0x6a, 0x3b, 0x8d, 0xd4, 0x2e, 0xc4, 0x64, + 0xd9, 0x9b, 0x3a, 0x33, 0x6c, 0xf2, 0xa6, 0xdd, 0xff, 0x56, 0xdb, 0xdf, 0xd8, 0x8b, 0x88, 0x88, + 0x2b, 0x97, 0xd9, 0xfd, 0xaf, 0x73, 0x92, 0x74, 0xf7, 0x0b, 0x04, 0x96, 0x4c, 0xd0, 0x4d, 0xd1, + 0x3d, 0x6c, 0xf7, 0x9c, 0xc8, 0x8f, 0x30, 0x3b, 0x27, 0x89, 0x72, 0x3a, 0x85, 0xed, 0x96, 0x31, + 0x2b, 0xb6, 0x4b, 0xb6, 0xb6, 0xfd, 0xc8, 0xf7, 0x12, 0x3b, 0xf4, 0x64, 0xfe, 0x2e, 0x59, 0xc9, + 0xa0, 0x4f, 0xef, 0x92, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0x54, 0x87, 0xb1, 0x96, 0x1f, 0x44, 0xb7, + 0xfd, 0x40, 0xce, 0x2f, 0xd4, 0x41, 0x2f, 0x66, 0x50, 0x8a, 0x1a, 0x59, 0xc8, 0x46, 0x13, 0x83, + 0x13, 0x3c, 0xd1, 0x47, 0x61, 0x30, 0xac, 0x39, 0x0d, 0xb2, 0x72, 0x7d, 0x7a, 0x2a, 0xff, 0xf8, + 0xa9, 0x72, 0x92, 0x9c, 0xd9, 0xc5, 0x03, 0x63, 0x70, 0x12, 0x2c, 0xd9, 0xa1, 0x65, 0xe8, 0x67, + 0xe9, 0x16, 0x59, 0x10, 0xc4, 0x9c, 0x40, 0xb9, 0x29, 0x07, 0x78, 0xbe, 0x37, 0x31, 0x30, 0xe6, + 0xc5, 0xe9, 0x1a, 0x10, 0xd7, 0x43, 0x3f, 0x9c, 0x3e, 0x99, 0xbf, 0x06, 0xc4, 0xad, 0xf2, 0x7a, + 0xb5, 0xd3, 0x1a, 0x50, 0x44, 0x38, 0x66, 0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xea, 0xe0, 0xb9, + 0x95, 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee, 0xa4, 0x94, 0x85, 0xfd, 0x07, 0x83, 0x69, 0x99, 0x85, + 0x29, 0x14, 0xfe, 0x82, 0x95, 0xb2, 0x35, 0xbf, 0xbf, 0x57, 0xfd, 0xe6, 0x11, 0x5e, 0x85, 0x3e, + 0x67, 0xc1, 0xa9, 0x56, 0xe6, 0x87, 0x08, 0x01, 0xa0, 0x37, 0x35, 0x29, 0xff, 0x74, 0x15, 0x30, + 0x33, 0x1b, 0x8f, 0x73, 0x6a, 0x4a, 0x5e, 0x37, 0x8b, 0x6f, 0xfb, 0xba, 0xb9, 0x0a, 0x43, 0x35, + 0x7e, 0x15, 0xe9, 0x98, 0x5b, 0x3f, 0x79, 0xf7, 0x66, 0xa2, 0x84, 0xb8, 0xc3, 0x6c, 0x62, 0xc5, + 0x02, 0xfd, 0x88, 0x05, 0x67, 0x92, 0x4d, 0xc7, 0x84, 0xa1, 0x45, 0x94, 0x4d, 0xae, 0xcb, 0x58, + 0x16, 0xdf, 0x9f, 0x92, 0xff, 0x0d, 0xe2, 0x83, 0x6e, 0x04, 0xb8, 0x73, 0x65, 0x68, 0x31, 0x43, + 0x99, 0x32, 0x60, 0x1a, 0x90, 0x7a, 0x50, 0xa8, 0xbc, 0x00, 0x23, 0x4d, 0xbf, 0xed, 0x45, 0xc2, + 0xd1, 0x4b, 0x38, 0x9d, 0x30, 0x67, 0x8b, 0x55, 0x0d, 0x8e, 0x0d, 0xaa, 0x84, 0x1a, 0x66, 0xe8, + 0xbe, 0xd5, 0x30, 0x6f, 0xc2, 0x88, 0xa7, 0x79, 0x26, 0x0b, 0x79, 0xe0, 0x7c, 0x7e, 0x84, 0x5c, + 0xdd, 0x8f, 0x99, 0xb7, 0x52, 0x87, 0x60, 0x83, 0xdb, 0xf1, 0x7a, 0x80, 0x7d, 0xc9, 0xca, 0x10, + 0xea, 0xb9, 0x2a, 0xe6, 0x43, 0xa6, 0x2a, 0xe6, 0x7c, 0x52, 0x15, 0x93, 0x32, 0x1e, 0x18, 0x5a, + 0x98, 0xde, 0xb3, 0x3b, 0xf5, 0x1a, 0x65, 0xd3, 0x6e, 0xc0, 0xb9, 0x6e, 0xc7, 0x12, 0xf3, 0xf8, + 0xab, 0x2b, 0x53, 0x71, 0xec, 0xf1, 0x57, 0x5f, 0x59, 0xc4, 0x0c, 0xd3, 0x6b, 0xfc, 0x26, 0xfb, + 0xbf, 0x58, 0x50, 0xac, 0xf8, 0xf5, 0x63, 0xb8, 0xf0, 0x7e, 0xd8, 0xb8, 0xf0, 0x3e, 0x9c, 0x7d, + 0x20, 0xd6, 0x73, 0x4d, 0x1f, 0x4b, 0x09, 0xd3, 0xc7, 0x99, 0x3c, 0x06, 0x9d, 0x0d, 0x1d, 0x3f, + 0x5d, 0x84, 0xe1, 0x8a, 0x5f, 0x57, 0xee, 0xf6, 0xff, 0xf4, 0x7e, 0xdc, 0xed, 0x73, 0x73, 0x65, + 0x68, 0x9c, 0x99, 0xa3, 0xa0, 0x7c, 0x69, 0xfc, 0x6d, 0xe6, 0x75, 0x7f, 0x8b, 0xb8, 0x5b, 0xdb, + 0x11, 0xa9, 0x27, 0x3f, 0xe7, 0xf8, 0xbc, 0xee, 0xff, 0xa0, 0x00, 0xe3, 0x89, 0xda, 0x51, 0x03, + 0x46, 0x1b, 0xba, 0x62, 0x5d, 0xcc, 0xd3, 0xfb, 0xd2, 0xc9, 0x0b, 0xaf, 0x65, 0x0d, 0x84, 0x4d, + 0xe6, 0x68, 0x16, 0x40, 0x59, 0x9a, 0xa5, 0x7a, 0x95, 0x49, 0xfd, 0xca, 0x14, 0x1d, 0x62, 0x8d, + 0x02, 0xbd, 0x08, 0xc3, 0x91, 0xdf, 0xf2, 0x1b, 0xfe, 0xd6, 0xde, 0x55, 0x22, 0x43, 0x7b, 0x29, + 0x5f, 0xc4, 0xf5, 0x18, 0x85, 0x75, 0x3a, 0x74, 0x07, 0x26, 0x15, 0x93, 0xea, 0x11, 0x18, 0x1b, + 0x98, 0x56, 0x61, 0x2d, 0xc9, 0x11, 0xa7, 0x2b, 0xb1, 0x7f, 0xae, 0xc8, 0xbb, 0xd8, 0x8b, 0xdc, + 0x77, 0x57, 0xc3, 0x3b, 0x7b, 0x35, 0x7c, 0xdd, 0x82, 0x09, 0x5a, 0x3b, 0x73, 0xb4, 0x92, 0xc7, + 0xbc, 0x8a, 0xc9, 0x6d, 0x75, 0x88, 0xc9, 0x7d, 0x9e, 0xee, 0x9a, 0x75, 0xbf, 0x1d, 0x09, 0xdd, + 0x9d, 0xb6, 0x2d, 0x52, 0x28, 0x16, 0x58, 0x41, 0x47, 0x82, 0x40, 0x3c, 0x0e, 0xd5, 0xe9, 0x48, + 0x10, 0x60, 0x81, 0x95, 0x21, 0xbb, 0xfb, 0xb2, 0x43, 0x76, 0xf3, 0xc8, 0xab, 0xc2, 0x25, 0x47, + 0x08, 0x5c, 0x5a, 0xe4, 0x55, 0xe9, 0xab, 0x13, 0xd3, 0xd8, 0x5f, 0x2d, 0xc2, 0x48, 0xc5, 0xaf, + 0xc7, 0x56, 0xe6, 0x17, 0x0c, 0x2b, 0xf3, 0xb9, 0x84, 0x95, 0x79, 0x42, 0xa7, 0x7d, 0xd7, 0xa6, + 0xfc, 0xad, 0xb2, 0x29, 0xff, 0x9a, 0xc5, 0x46, 0x6d, 0x71, 0xad, 0xca, 0xfd, 0xf6, 0xd0, 0x25, + 0x18, 0x66, 0x1b, 0x0c, 0x7b, 0x8d, 0x2c, 0x4d, 0xaf, 0x2c, 0xdf, 0xd5, 0x5a, 0x0c, 0xc6, 0x3a, + 0x0d, 0xba, 0x00, 0x43, 0x21, 0x71, 0x82, 0xda, 0xb6, 0xda, 0x5d, 0x85, 0x9d, 0x94, 0xc3, 0xb0, + 0xc2, 0xa2, 0xd7, 0xe3, 0xa0, 0x9f, 0xc5, 0xfc, 0xd7, 0x8d, 0x7a, 0x7b, 0xf8, 0x12, 0xc9, 0x8f, + 0xf4, 0x69, 0xdf, 0x02, 0x94, 0xa6, 0xef, 0x21, 0x2c, 0x5d, 0xd9, 0x0c, 0x4b, 0x57, 0x4a, 0x85, + 0xa4, 0xfb, 0x33, 0x0b, 0xc6, 0x2a, 0x7e, 0x9d, 0x2e, 0xdd, 0xef, 0xa4, 0x75, 0xaa, 0x47, 0x3c, + 0x1e, 0xe8, 0x10, 0xf1, 0xf8, 0x31, 0xe8, 0xaf, 0xf8, 0xf5, 0x95, 0x4a, 0xa7, 0xd0, 0x02, 0xf6, + 0xdf, 0xb4, 0x60, 0xb0, 0xe2, 0xd7, 0x8f, 0xc1, 0x2c, 0xf0, 0x21, 0xd3, 0x2c, 0xf0, 0x50, 0xce, + 0xbc, 0xc9, 0xb1, 0x04, 0xfc, 0x8d, 0x3e, 0x18, 0xa5, 0xed, 0xf4, 0xb7, 0xe4, 0x50, 0x1a, 0xdd, + 0x66, 0xf5, 0xd0, 0x6d, 0x54, 0x0a, 0xf7, 0x1b, 0x0d, 0xff, 0x76, 0x72, 0x58, 0x97, 0x19, 0x14, + 0x0b, 0x2c, 0x7a, 0x06, 0x86, 0x5a, 0x01, 0xd9, 0x75, 0x7d, 0x21, 0xde, 0x6a, 0x46, 0x96, 0x8a, + 0x80, 0x63, 0x45, 0x41, 0xaf, 0x85, 0xa1, 0xeb, 0xd1, 0xa3, 0xbc, 0xe6, 0x7b, 0x75, 0xae, 0x39, + 0x2f, 0x8a, 0xb4, 0x1c, 0x1a, 0x1c, 0x1b, 0x54, 0xe8, 0x16, 0x94, 0xd8, 0x7f, 0xb6, 0xed, 0x1c, + 0x3e, 0x7b, 0xaf, 0xc8, 0x2a, 0x28, 0x18, 0xe0, 0x98, 0x17, 0x7a, 0x0e, 0x20, 0x92, 0xa1, 0xed, + 0x43, 0x11, 0x68, 0x4d, 0x5d, 0x05, 0x54, 0xd0, 0xfb, 0x10, 0x6b, 0x54, 0xe8, 0x69, 0x28, 0x45, + 0x8e, 0xdb, 0xb8, 0xe6, 0x7a, 0x24, 0x64, 0x1a, 0xf1, 0xa2, 0x4c, 0xee, 0x27, 0x80, 0x38, 0xc6, + 0x53, 0x51, 0x8c, 0x05, 0xe1, 0xe0, 0xb9, 0xcb, 0x87, 0x18, 0x35, 0x13, 0xc5, 0xae, 0x29, 0x28, + 0xd6, 0x28, 0xd0, 0x36, 0x3c, 0xe2, 0x7a, 0x2c, 0x85, 0x05, 0xa9, 0xee, 0xb8, 0xad, 0xf5, 0x6b, + 0xd5, 0x9b, 0x24, 0x70, 0x37, 0xf7, 0xe6, 0x9d, 0xda, 0x0e, 0xf1, 0x64, 0x5e, 0x56, 0x99, 0xae, + 0xfb, 0x91, 0x95, 0x0e, 0xb4, 0xb8, 0x23, 0x27, 0xfb, 0x79, 0x36, 0xdf, 0xaf, 0x57, 0xd1, 0x7b, + 0x8d, 0xad, 0xe3, 0x94, 0xbe, 0x75, 0x1c, 0xec, 0x97, 0x07, 0xae, 0x57, 0xb5, 0x18, 0x12, 0x2f, + 0xc1, 0xc9, 0x8a, 0x5f, 0xaf, 0xf8, 0x41, 0xb4, 0xec, 0x07, 0xb7, 0x9d, 0xa0, 0x2e, 0xa7, 0x57, + 0x59, 0x46, 0xd1, 0xa0, 0xfb, 0x67, 0x3f, 0xdf, 0x5d, 0x8c, 0x08, 0x19, 0xcf, 0x33, 0x89, 0xed, + 0x90, 0x6f, 0xbf, 0x6a, 0x4c, 0x76, 0x50, 0x49, 0x60, 0x2e, 0x3b, 0x11, 0x41, 0xd7, 0x59, 0xe6, + 0xf5, 0xf8, 0x18, 0x15, 0xc5, 0x9f, 0xd2, 0x32, 0xaf, 0xc7, 0xc8, 0xcc, 0x73, 0xd7, 0x2c, 0x6f, + 0x7f, 0x56, 0x54, 0xc2, 0xef, 0xe0, 0xdc, 0xbf, 0xae, 0x97, 0xd4, 0xc5, 0x32, 0x4b, 0x44, 0x21, + 0x3f, 0xbd, 0x00, 0xb7, 0x7a, 0x76, 0xcc, 0x12, 0x61, 0xbf, 0x08, 0x93, 0xf4, 0xea, 0xa7, 0xe4, + 0x28, 0xf6, 0x91, 0xdd, 0xa3, 0x79, 0xfc, 0xd7, 0x7e, 0x76, 0x0e, 0x24, 0xd2, 0x9f, 0xa0, 0x4f, + 0xc2, 0x58, 0x48, 0xae, 0xb9, 0x5e, 0xfb, 0x8e, 0x54, 0xbc, 0x74, 0x78, 0x73, 0x58, 0x5d, 0xd2, + 0x29, 0xb9, 0xfa, 0xd6, 0x84, 0xe1, 0x04, 0x37, 0xd4, 0x84, 0xb1, 0xdb, 0xae, 0x57, 0xf7, 0x6f, + 0x87, 0x92, 0xff, 0x50, 0xbe, 0x16, 0xf7, 0x16, 0xa7, 0x4c, 0xb4, 0xd1, 0xa8, 0xee, 0x96, 0xc1, + 0x0c, 0x27, 0x98, 0xd3, 0xb5, 0x16, 0xb4, 0xbd, 0xb9, 0xf0, 0x46, 0x48, 0x02, 0x91, 0xf9, 0x9f, + 0xa7, 0xe5, 0x95, 0x40, 0x1c, 0xe3, 0xe9, 0x5a, 0x63, 0x7f, 0x2e, 0x07, 0x7e, 0x9b, 0xe7, 0xda, + 0x10, 0x6b, 0x0d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x5e, 0xc4, 0xfe, 0xad, 0xf9, 0x1e, 0xf6, 0xfd, + 0x48, 0xee, 0x5e, 0xcc, 0x13, 0x41, 0x83, 0x63, 0x83, 0x0a, 0x2d, 0x03, 0x0a, 0xdb, 0xad, 0x56, + 0x83, 0x39, 0x33, 0x39, 0x0d, 0xc6, 0x8a, 0x7b, 0x79, 0x14, 0x79, 0xac, 0xe0, 0x6a, 0x0a, 0x8b, + 0x33, 0x4a, 0xd0, 0x63, 0x69, 0x53, 0x34, 0xb5, 0x9f, 0x35, 0x95, 0x5b, 0x7c, 0xaa, 0xbc, 0x9d, + 0x12, 0x87, 0x96, 0x60, 0x30, 0xdc, 0x0b, 0x6b, 0x91, 0x08, 0xed, 0x98, 0x93, 0x46, 0xab, 0xca, + 0x48, 0xb4, 0x2c, 0x8e, 0xbc, 0x08, 0x96, 0x65, 0x51, 0x0d, 0xa6, 0x04, 0xc7, 0x85, 0x6d, 0xc7, + 0x53, 0xf9, 0x82, 0xb8, 0x4f, 0xf7, 0xa5, 0x7b, 0xfb, 0xe5, 0x29, 0x51, 0xb3, 0x8e, 0x3e, 0xd8, + 0x2f, 0x9f, 0xaa, 0xf8, 0xf5, 0x0c, 0x0c, 0xce, 0xe2, 0xc6, 0x27, 0x5f, 0xad, 0xe6, 0x37, 0x5b, + 0x95, 0xc0, 0xdf, 0x74, 0x1b, 0xa4, 0x93, 0xd5, 0xac, 0x6a, 0x50, 0x8a, 0xc9, 0x67, 0xc0, 0x70, + 0x82, 0x9b, 0xfd, 0x59, 0x26, 0xba, 0xb1, 0x64, 0xf1, 0x51, 0x3b, 0x20, 0xa8, 0x09, 0xa3, 0x2d, + 0xb6, 0xb8, 0x45, 0x06, 0x0c, 0x31, 0xd7, 0x5f, 0xe8, 0x51, 0xfb, 0x73, 0x9b, 0xe5, 0xf5, 0x32, + 0x3c, 0xa3, 0x2a, 0x3a, 0x3b, 0x6c, 0x72, 0xb7, 0xff, 0xf5, 0x69, 0x76, 0xf8, 0x57, 0xb9, 0x4a, + 0x67, 0x50, 0x3c, 0x21, 0x11, 0xb7, 0xc8, 0x99, 0x7c, 0xdd, 0x62, 0x3c, 0x2c, 0xe2, 0x19, 0x0a, + 0x96, 0x65, 0xd1, 0x27, 0x60, 0x8c, 0x5e, 0xca, 0xd4, 0x01, 0x1c, 0x4e, 0x9f, 0xc8, 0x0f, 0xf5, + 0xa1, 0xa8, 0xf4, 0xec, 0x38, 0x7a, 0x61, 0x9c, 0x60, 0x86, 0x5e, 0x67, 0x9e, 0x48, 0x92, 0x75, + 0xa1, 0x17, 0xd6, 0xba, 0xd3, 0x91, 0x64, 0xab, 0x31, 0x41, 0x6d, 0x98, 0x4a, 0x27, 0xec, 0x0b, + 0xa7, 0xed, 0x7c, 0xe9, 0x36, 0x9d, 0x73, 0x2f, 0x4e, 0x63, 0x92, 0xc6, 0x85, 0x38, 0x8b, 0x3f, + 0xba, 0x06, 0xa3, 0x22, 0x63, 0xba, 0x98, 0xb9, 0x45, 0x43, 0xe5, 0x39, 0x8a, 0x75, 0xe4, 0x41, + 0x12, 0x80, 0xcd, 0xc2, 0x68, 0x0b, 0xce, 0x68, 0x49, 0xae, 0x2e, 0x07, 0x0e, 0xf3, 0x5b, 0x70, + 0xd9, 0x76, 0xaa, 0x89, 0x25, 0x8f, 0xde, 0xdb, 0x2f, 0x9f, 0x59, 0xef, 0x44, 0x88, 0x3b, 0xf3, + 0x41, 0xd7, 0xe1, 0x24, 0x7f, 0xa8, 0xbe, 0x48, 0x9c, 0x7a, 0xc3, 0xf5, 0x94, 0xdc, 0xc3, 0x97, + 0xfc, 0xe9, 0x7b, 0xfb, 0xe5, 0x93, 0x73, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0xfa, 0x10, 0x94, 0xea, + 0x5e, 0x28, 0xfa, 0x60, 0xc0, 0xc8, 0x23, 0x56, 0x5a, 0x5c, 0xab, 0xaa, 0xef, 0x8f, 0xff, 0xe0, + 0xb8, 0x00, 0xda, 0xe2, 0x6a, 0x71, 0xa5, 0xac, 0x19, 0x4c, 0x05, 0xea, 0x4a, 0xea, 0x33, 0x8d, + 0xa7, 0xaa, 0xdc, 0x1e, 0xa4, 0x5e, 0x70, 0x18, 0xaf, 0x58, 0x0d, 0xc6, 0xe8, 0x35, 0x40, 0x22, + 0x5e, 0xfd, 0x5c, 0x8d, 0xa5, 0x57, 0x61, 0x56, 0x84, 0x21, 0xf3, 0xf1, 0x64, 0x35, 0x45, 0x81, + 0x33, 0x4a, 0xa1, 0x2b, 0x74, 0x57, 0xd1, 0xa1, 0x62, 0xd7, 0x52, 0xa9, 0x25, 0x17, 0x49, 0x2b, + 0x20, 0xcc, 0x0f, 0xcb, 0xe4, 0x88, 0x13, 0xe5, 0x50, 0x1d, 0x1e, 0x71, 0xda, 0x91, 0xcf, 0x2c, + 0x0e, 0x26, 0xe9, 0xba, 0xbf, 0x43, 0x3c, 0x66, 0xec, 0x1b, 0x9a, 0x3f, 0x47, 0x05, 0xab, 0xb9, + 0x0e, 0x74, 0xb8, 0x23, 0x17, 0x2a, 0x10, 0xab, 0x5c, 0xd2, 0x60, 0x86, 0x1f, 0xcb, 0xc8, 0x27, + 0xfd, 0x22, 0x0c, 0x6f, 0xfb, 0x61, 0xb4, 0x46, 0xa2, 0xdb, 0x7e, 0xb0, 0x23, 0xc2, 0xe8, 0xc6, + 0x41, 0xc9, 0x63, 0x14, 0xd6, 0xe9, 0xe8, 0x8d, 0x97, 0xb9, 0xa2, 0xac, 0x2c, 0x32, 0x2f, 0x80, + 0xa1, 0x78, 0x8f, 0xb9, 0xc2, 0xc1, 0x58, 0xe2, 0x25, 0xe9, 0x4a, 0x65, 0x81, 0x59, 0xf4, 0x13, + 0xa4, 0x2b, 0x95, 0x05, 0x2c, 0xf1, 0x74, 0xba, 0x86, 0xdb, 0x4e, 0x40, 0x2a, 0x81, 0x5f, 0x23, + 0xa1, 0x16, 0x0a, 0xff, 0x61, 0x1e, 0x24, 0x98, 0x4e, 0xd7, 0x6a, 0x16, 0x01, 0xce, 0x2e, 0x87, + 0x48, 0x3a, 0xc1, 0xdb, 0x58, 0xbe, 0x29, 0x26, 0x2d, 0xcf, 0xf4, 0x98, 0xe3, 0xcd, 0x83, 0x09, + 0x95, 0x5a, 0x8e, 0x87, 0x05, 0x0e, 0xa7, 0xc7, 0xd9, 0xdc, 0xee, 0x3d, 0xa6, 0xb0, 0x32, 0x6e, + 0xad, 0x24, 0x38, 0xe1, 0x14, 0x6f, 0x23, 0xc2, 0xdc, 0x44, 0xd7, 0x08, 0x73, 0x17, 0xa1, 0x14, + 0xb6, 0x37, 0xea, 0x7e, 0xd3, 0x71, 0x3d, 0x66, 0xd1, 0xd7, 0xae, 0x5e, 0x55, 0x89, 0xc0, 0x31, + 0x0d, 0x5a, 0x86, 0x21, 0x47, 0x5a, 0xae, 0x50, 0x7e, 0x4c, 0x21, 0x65, 0xaf, 0xe2, 0x61, 0x36, + 0xa4, 0xad, 0x4a, 0x95, 0x45, 0xaf, 0xc0, 0xa8, 0x78, 0x68, 0x2d, 0x52, 0xa7, 0x4e, 0x99, 0xaf, + 0xe1, 0xaa, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x06, 0x0c, 0x47, 0x7e, 0x83, 0x3d, 0xe9, 0xa2, 0x62, + 0xde, 0xa9, 0xfc, 0xe8, 0x78, 0xeb, 0x8a, 0x4c, 0x57, 0x1a, 0xab, 0xa2, 0x58, 0xe7, 0x83, 0xd6, + 0xf9, 0x7c, 0x67, 0x81, 0xef, 0x49, 0x28, 0x72, 0x6f, 0x9e, 0xc9, 0x73, 0xc7, 0x62, 0x64, 0xe6, + 0x72, 0x10, 0x25, 0xb1, 0xce, 0x06, 0x5d, 0x86, 0xc9, 0x56, 0xe0, 0xfa, 0x6c, 0x4e, 0x28, 0xa3, + 0xe5, 0xb4, 0x99, 0xe6, 0xaa, 0x92, 0x24, 0xc0, 0xe9, 0x32, 0xec, 0x9d, 0xbc, 0x00, 0x4e, 0x9f, + 0xe6, 0xa9, 0x3a, 0xf8, 0x4d, 0x96, 0xc3, 0xb0, 0xc2, 0xa2, 0x55, 0xb6, 0x13, 0x73, 0x25, 0xcc, + 0xf4, 0x4c, 0x7e, 0x18, 0x23, 0x5d, 0x59, 0xc3, 0x85, 0x57, 0xf5, 0x17, 0xc7, 0x1c, 0x50, 0x5d, + 0xcb, 0x90, 0x49, 0xaf, 0x00, 0xe1, 0xf4, 0x23, 0x1d, 0xfc, 0x01, 0x13, 0x97, 0xa2, 0x58, 0x20, + 0x30, 0xc0, 0x21, 0x4e, 0xf0, 0x44, 0x1f, 0x81, 0x09, 0x11, 0x7c, 0x31, 0xee, 0xa6, 0x33, 0xb1, + 0xa3, 0x3c, 0x4e, 0xe0, 0x70, 0x8a, 0x9a, 0xa7, 0xca, 0x70, 0x36, 0x1a, 0x44, 0x6c, 0x7d, 0xd7, + 0x5c, 0x6f, 0x27, 0x9c, 0x3e, 0xcb, 0xf6, 0x07, 0x91, 0x2a, 0x23, 0x89, 0xc5, 0x19, 0x25, 0xd0, + 0x3a, 0x4c, 0xb4, 0x02, 0x42, 0x9a, 0x4c, 0xd0, 0x17, 0xe7, 0x59, 0x99, 0x87, 0x89, 0xa0, 0x2d, + 0xa9, 0x24, 0x70, 0x07, 0x19, 0x30, 0x9c, 0xe2, 0x80, 0x6e, 0xc3, 0x90, 0xbf, 0x4b, 0x82, 0x6d, + 0xe2, 0xd4, 0xa7, 0xcf, 0x75, 0x78, 0xb8, 0x21, 0x0e, 0xb7, 0xeb, 0x82, 0x36, 0xe1, 0xe8, 0x20, + 0xc1, 0xdd, 0x1d, 0x1d, 0x64, 0x65, 0xe8, 0x2f, 0x5a, 0x70, 0x5a, 0xda, 0x46, 0xaa, 0x2d, 0xda, + 0xeb, 0x0b, 0xbe, 0x17, 0x46, 0x01, 0x0f, 0x6c, 0xf0, 0x68, 0xfe, 0x63, 0xff, 0xf5, 0x9c, 0x42, + 0x4a, 0x0f, 0x7c, 0x3a, 0x8f, 0x22, 0xc4, 0xf9, 0x35, 0xa2, 0x05, 0x98, 0x0c, 0x49, 0x24, 0x37, + 0xa3, 0xb9, 0x70, 0xf9, 0xf5, 0xc5, 0xb5, 0xe9, 0xc7, 0x78, 0x54, 0x06, 0xba, 0x18, 0xaa, 0x49, + 0x24, 0x4e, 0xd3, 0xa3, 0x4b, 0x50, 0xf0, 0xc3, 0xe9, 0xc7, 0x3b, 0x24, 0x55, 0xf5, 0xeb, 0xd7, + 0xab, 0xdc, 0xe1, 0xed, 0x7a, 0x15, 0x17, 0xfc, 0x50, 0xa6, 0xab, 0xa0, 0xf7, 0xb1, 0x70, 0xfa, + 0x09, 0xae, 0x35, 0x94, 0xe9, 0x2a, 0x18, 0x10, 0xc7, 0x78, 0xb4, 0x0d, 0xe3, 0xa1, 0x71, 0xef, + 0x0d, 0xa7, 0xcf, 0xb3, 0x9e, 0x7a, 0x22, 0x6f, 0xd0, 0x0c, 0x6a, 0x2d, 0xda, 0xbc, 0xc9, 0x05, + 0x27, 0xd9, 0xf2, 0xd5, 0xa5, 0x5d, 0xf0, 0xc3, 0xe9, 0x27, 0xbb, 0xac, 0x2e, 0x8d, 0x58, 0x5f, + 0x5d, 0x3a, 0x0f, 0x9c, 0xe0, 0x39, 0xf3, 0x5d, 0x30, 0x99, 0x12, 0x97, 0x0e, 0x93, 0x89, 0x69, + 0x66, 0x07, 0x46, 0x8d, 0x29, 0xf9, 0x40, 0x1d, 0x0b, 0xbe, 0x67, 0x08, 0x4a, 0xca, 0xe8, 0x8c, + 0x2e, 0x9a, 0xbe, 0x04, 0xa7, 0x93, 0xbe, 0x04, 0x43, 0x15, 0xbf, 0x6e, 0xb8, 0x0f, 0xac, 0x67, + 0xc4, 0xee, 0xcb, 0xdb, 0x00, 0x7b, 0x7f, 0xd3, 0xa0, 0x69, 0xf2, 0x8b, 0x3d, 0x3b, 0x25, 0xf4, + 0x75, 0x34, 0x0e, 0x5c, 0x86, 0x49, 0xcf, 0x67, 0x32, 0x3a, 0xa9, 0x4b, 0x01, 0x8c, 0xc9, 0x59, + 0x25, 0x3d, 0x18, 0x4e, 0x82, 0x00, 0xa7, 0xcb, 0xd0, 0x0a, 0xb9, 0xa0, 0x94, 0xb4, 0x46, 0x70, + 0x39, 0x0a, 0x0b, 0x2c, 0x7a, 0x0c, 0xfa, 0x5b, 0x7e, 0x7d, 0xa5, 0x22, 0xe4, 0x73, 0x2d, 0x62, + 0x6c, 0x7d, 0xa5, 0x82, 0x39, 0x0e, 0xcd, 0xc1, 0x00, 0xfb, 0x11, 0x4e, 0x8f, 0xe4, 0x47, 0x3d, + 0x61, 0x25, 0xb4, 0x3c, 0x57, 0xac, 0x00, 0x16, 0x05, 0x99, 0x56, 0x94, 0x5e, 0x6a, 0x98, 0x56, + 0x74, 0xf0, 0x3e, 0xb5, 0xa2, 0x92, 0x01, 0x8e, 0x79, 0xa1, 0x3b, 0x70, 0xd2, 0xb8, 0x48, 0xf2, + 0x29, 0x42, 0x42, 0x11, 0x79, 0xe1, 0xb1, 0x8e, 0x37, 0x48, 0xe1, 0xc4, 0x70, 0x46, 0x34, 0xfa, + 0xe4, 0x4a, 0x16, 0x27, 0x9c, 0x5d, 0x01, 0x6a, 0xc0, 0x64, 0x2d, 0x55, 0xeb, 0x50, 0xef, 0xb5, + 0xaa, 0x01, 0x4d, 0xd7, 0x98, 0x66, 0x8c, 0x5e, 0x81, 0xa1, 0xb7, 0xfc, 0x90, 0x9d, 0x6d, 0xe2, + 0x4e, 0x21, 0x9f, 0xed, 0x0f, 0xbd, 0x7e, 0xbd, 0xca, 0xe0, 0x07, 0xfb, 0xe5, 0xe1, 0x8a, 0x5f, + 0x97, 0x7f, 0xb1, 0x2a, 0x80, 0x7e, 0xc0, 0x82, 0x99, 0xf4, 0x4d, 0x55, 0x35, 0x7a, 0xb4, 0xf7, + 0x46, 0xdb, 0xa2, 0xd2, 0x99, 0xa5, 0x5c, 0x76, 0xb8, 0x43, 0x55, 0xe8, 0x83, 0x74, 0x21, 0x84, + 0xee, 0x5d, 0x22, 0x92, 0x84, 0x3e, 0x1a, 0x2f, 0x04, 0x0a, 0x3d, 0xd8, 0x2f, 0x8f, 0xf3, 0x2d, + 0x2d, 0x7e, 0x37, 0x23, 0x0a, 0xd8, 0xbf, 0x6c, 0x31, 0xb5, 0xac, 0x80, 0x92, 0xb0, 0xdd, 0x38, + 0x8e, 0xcc, 0xc0, 0x4b, 0x86, 0xc9, 0xf3, 0xbe, 0xfd, 0x61, 0xfe, 0x89, 0xc5, 0xfc, 0x61, 0x8e, + 0xf1, 0xe1, 0xcb, 0xeb, 0x30, 0x14, 0xc9, 0x8c, 0xcd, 0x1d, 0x92, 0x19, 0x6b, 0x8d, 0x62, 0x3e, + 0x41, 0xea, 0x72, 0xa0, 0x92, 0x33, 0x2b, 0x36, 0xf6, 0x3f, 0xe4, 0x23, 0x20, 0x31, 0xc7, 0x60, + 0x59, 0x5a, 0x34, 0x2d, 0x4b, 0xe5, 0x2e, 0x5f, 0x90, 0x63, 0x61, 0xfa, 0x07, 0x66, 0xbb, 0x99, + 0x52, 0xec, 0x9d, 0xee, 0x88, 0x65, 0x7f, 0xde, 0x02, 0x88, 0x63, 0x79, 0xf7, 0x90, 0x93, 0xef, + 0x25, 0x7a, 0x1d, 0xf0, 0x23, 0xbf, 0xe6, 0x37, 0x84, 0xdd, 0xf4, 0x91, 0xd8, 0xb8, 0xc5, 0xe1, + 0x07, 0xda, 0x6f, 0xac, 0xa8, 0x51, 0x59, 0x46, 0x0e, 0x2c, 0xc6, 0xe6, 0x56, 0x23, 0x6a, 0xe0, + 0x17, 0x2d, 0x38, 0x91, 0xe5, 0x45, 0x4d, 0x2f, 0x97, 0x5c, 0x3d, 0xa8, 0x9c, 0xe4, 0xd4, 0x68, + 0xde, 0x14, 0x70, 0xac, 0x28, 0x7a, 0x4e, 0x76, 0x78, 0xb8, 0x20, 0xda, 0xd7, 0x61, 0xb4, 0x12, + 0x10, 0xed, 0x5c, 0x7e, 0x95, 0x47, 0xa3, 0xe0, 0xed, 0x79, 0xe6, 0xd0, 0x91, 0x28, 0xec, 0x2f, + 0x17, 0xe0, 0x04, 0xf7, 0x35, 0x99, 0xdb, 0xf5, 0xdd, 0x7a, 0xc5, 0xaf, 0x8b, 0xb7, 0x72, 0x6f, + 0xc0, 0x48, 0x4b, 0xd3, 0xe9, 0x76, 0x0a, 0x08, 0xab, 0xeb, 0x7e, 0x63, 0x2d, 0x94, 0x0e, 0xc5, + 0x06, 0x2f, 0x54, 0x87, 0x11, 0xb2, 0xeb, 0xd6, 0x94, 0xc3, 0x42, 0xe1, 0xd0, 0x67, 0xa4, 0xaa, + 0x65, 0x49, 0xe3, 0x83, 0x0d, 0xae, 0x0f, 0x20, 0x05, 0xb9, 0xfd, 0x63, 0x16, 0x3c, 0x94, 0x13, + 0x3e, 0x96, 0x56, 0x77, 0x9b, 0x79, 0xf5, 0x88, 0x69, 0xab, 0xaa, 0xe3, 0xbe, 0x3e, 0x58, 0x60, + 0xd1, 0x47, 0x01, 0xb8, 0xaf, 0x0e, 0xf1, 0x6a, 0x5d, 0xe3, 0x6c, 0x1a, 0x21, 0x02, 0xb5, 0x68, + 0x6f, 0xb2, 0x3c, 0xd6, 0x78, 0xd9, 0x5f, 0xec, 0x83, 0x7e, 0xe6, 0x1b, 0x82, 0x2a, 0x30, 0xb8, + 0xcd, 0x13, 0x02, 0x75, 0x1c, 0x37, 0x4a, 0x2b, 0x73, 0x0c, 0xc5, 0xe3, 0xa6, 0x41, 0xb1, 0x64, + 0x83, 0x56, 0x61, 0x8a, 0xe7, 0x65, 0x6a, 0x2c, 0x92, 0x86, 0xb3, 0x27, 0xd5, 0xa5, 0x3c, 0x89, + 0xb0, 0x52, 0x1b, 0xaf, 0xa4, 0x49, 0x70, 0x56, 0x39, 0xf4, 0x2a, 0x8c, 0xd1, 0xeb, 0xab, 0xdf, + 0x8e, 0x24, 0x27, 0x9e, 0x91, 0x49, 0x49, 0xf4, 0xeb, 0x06, 0x16, 0x27, 0xa8, 0xd1, 0x2b, 0x30, + 0xda, 0x4a, 0x29, 0x86, 0xfb, 0x63, 0x0d, 0x8a, 0xa9, 0x0c, 0x36, 0x69, 0x99, 0x23, 0x75, 0x9b, + 0xb9, 0x8d, 0xaf, 0x6f, 0x07, 0x24, 0xdc, 0xf6, 0x1b, 0x75, 0x26, 0x39, 0xf6, 0x6b, 0x8e, 0xd4, + 0x09, 0x3c, 0x4e, 0x95, 0xa0, 0x5c, 0x36, 0x1d, 0xb7, 0xd1, 0x0e, 0x48, 0xcc, 0x65, 0xc0, 0xe4, + 0xb2, 0x9c, 0xc0, 0xe3, 0x54, 0x89, 0xee, 0x1a, 0xef, 0xc1, 0xa3, 0xd1, 0x78, 0xdb, 0x3f, 0x53, + 0x00, 0x63, 0x68, 0xbf, 0x73, 0x33, 0x45, 0xd1, 0x2f, 0xdb, 0x0a, 0x5a, 0x35, 0xe1, 0x07, 0x95, + 0xf9, 0x65, 0x71, 0x02, 0x58, 0xfe, 0x65, 0xf4, 0x3f, 0x66, 0xa5, 0xe8, 0x1a, 0x3f, 0x59, 0x09, + 0x7c, 0x7a, 0xc8, 0xc9, 0x78, 0x65, 0xea, 0xbd, 0xc2, 0xa0, 0x7c, 0xcb, 0xdd, 0x21, 0xb2, 0xa7, + 0xf0, 0xe8, 0xe6, 0x1c, 0x0c, 0x97, 0xa1, 0xaa, 0x08, 0xaa, 0x20, 0xb9, 0xa0, 0x4b, 0x30, 0x2c, + 0xd2, 0xff, 0x30, 0xb7, 0x7a, 0xbe, 0x98, 0x98, 0x8b, 0xd3, 0x62, 0x0c, 0xc6, 0x3a, 0x8d, 0xfd, + 0x83, 0x05, 0x98, 0xca, 0x78, 0x17, 0xc5, 0x8f, 0x91, 0x2d, 0x37, 0x8c, 0x54, 0x8e, 0x59, 0xed, + 0x18, 0xe1, 0x70, 0xac, 0x28, 0xe8, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0xde, 0x1d, 0x08, + 0xec, 0x21, 0xb3, 0xb5, 0x9e, 0x83, 0xbe, 0x76, 0x48, 0x64, 0x4c, 0x5e, 0x75, 0x6c, 0x33, 0x73, + 0x30, 0xc3, 0xd0, 0x1b, 0xd8, 0x96, 0xb2, 0xac, 0x6a, 0x37, 0x30, 0x6e, 0x5b, 0xe5, 0x38, 0xda, + 0xb8, 0x88, 0x78, 0x8e, 0x17, 0x89, 0x7b, 0x5a, 0x1c, 0x5c, 0x92, 0x41, 0xb1, 0xc0, 0xda, 0x5f, + 0x28, 0xc2, 0xe9, 0xdc, 0x97, 0x92, 0xb4, 0xe9, 0x4d, 0xdf, 0x73, 0x23, 0x5f, 0xf9, 0x8e, 0xf1, + 0x80, 0x92, 0xa4, 0xb5, 0xbd, 0x2a, 0xe0, 0x58, 0x51, 0xa0, 0xf3, 0xd0, 0xcf, 0x94, 0xc9, 0xa9, + 0x6c, 0xbb, 0xf3, 0x8b, 0x3c, 0xc2, 0x18, 0x47, 0xf7, 0x9c, 0x20, 0xfd, 0x31, 0x2a, 0xc1, 0xf8, + 0x8d, 0xe4, 0x81, 0x42, 0x9b, 0xeb, 0xfb, 0x0d, 0xcc, 0x90, 0xe8, 0x09, 0xd1, 0x5f, 0x09, 0x67, + 0x29, 0xec, 0xd4, 0xfd, 0x50, 0xeb, 0xb4, 0xa7, 0x60, 0x70, 0x87, 0xec, 0x05, 0xae, 0xb7, 0x95, + 0x74, 0xa2, 0xbb, 0xca, 0xc1, 0x58, 0xe2, 0xcd, 0xf4, 0x90, 0x83, 0x47, 0x9d, 0xd9, 0x7c, 0xa8, + 0xab, 0x78, 0xf2, 0xc3, 0x45, 0x18, 0xc7, 0xf3, 0x8b, 0xef, 0x0e, 0xc4, 0x8d, 0xf4, 0x40, 0x1c, + 0x75, 0x66, 0xf3, 0xee, 0xa3, 0xf1, 0x0b, 0x16, 0x8c, 0xb3, 0x24, 0x44, 0x22, 0x1e, 0x82, 0xeb, + 0x7b, 0xc7, 0x70, 0x15, 0x78, 0x0c, 0xfa, 0x03, 0x5a, 0x69, 0x32, 0xcd, 0x2e, 0x6b, 0x09, 0xe6, + 0x38, 0xf4, 0x08, 0xf4, 0xb1, 0x26, 0xd0, 0xc1, 0x1b, 0xe1, 0x5b, 0xf0, 0xa2, 0x13, 0x39, 0x98, + 0x41, 0x59, 0x7c, 0x2d, 0x4c, 0x5a, 0x0d, 0x97, 0x37, 0x3a, 0x36, 0xf5, 0xbf, 0x33, 0x62, 0x28, + 0x64, 0x36, 0xed, 0xed, 0xc5, 0xd7, 0xca, 0x66, 0xd9, 0xf9, 0x9a, 0xfd, 0xc7, 0x05, 0x38, 0x9b, + 0x59, 0xae, 0xe7, 0xf8, 0x5a, 0x9d, 0x4b, 0x3f, 0xc8, 0x34, 0x33, 0xc5, 0x63, 0x74, 0x51, 0xee, + 0xeb, 0x55, 0xfa, 0xef, 0xef, 0x21, 0xec, 0x55, 0x66, 0x97, 0xbd, 0x43, 0xc2, 0x5e, 0x65, 0xb6, + 0x2d, 0x47, 0x4d, 0xf0, 0xe7, 0x85, 0x9c, 0x6f, 0x61, 0x0a, 0x83, 0x0b, 0x74, 0x9f, 0x61, 0xc8, + 0x50, 0x5e, 0xc2, 0xf9, 0x1e, 0xc3, 0x61, 0x58, 0x61, 0xd1, 0x1c, 0x8c, 0x37, 0x5d, 0x8f, 0x6e, + 0x3e, 0x7b, 0xa6, 0x28, 0xae, 0x6c, 0x00, 0xab, 0x26, 0x1a, 0x27, 0xe9, 0x91, 0xab, 0x85, 0xc4, + 0xe2, 0x5f, 0xf7, 0xca, 0xa1, 0x56, 0xdd, 0xac, 0xe9, 0x06, 0xa1, 0x7a, 0x31, 0x23, 0x3c, 0xd6, + 0xaa, 0xa6, 0x27, 0x2a, 0xf6, 0xae, 0x27, 0x1a, 0xc9, 0xd6, 0x11, 0xcd, 0xbc, 0x02, 0xa3, 0xf7, + 0x6d, 0x53, 0xb0, 0xbf, 0x5e, 0x84, 0x87, 0x3b, 0x2c, 0x7b, 0xbe, 0xd7, 0x1b, 0x63, 0xa0, 0xed, + 0xf5, 0xa9, 0x71, 0xa8, 0xc0, 0x89, 0xcd, 0x76, 0xa3, 0xb1, 0xc7, 0x5e, 0xee, 0x90, 0xba, 0xa4, + 0x10, 0x32, 0xa5, 0x54, 0x8e, 0x9c, 0x58, 0xce, 0xa0, 0xc1, 0x99, 0x25, 0xe9, 0x15, 0x8b, 0x9e, + 0x24, 0x7b, 0x8a, 0x55, 0xe2, 0x8a, 0x85, 0x75, 0x24, 0x36, 0x69, 0xd1, 0x65, 0x98, 0x74, 0x76, + 0x1d, 0x97, 0xc7, 0x15, 0x97, 0x0c, 0xf8, 0x1d, 0x4b, 0xa9, 0x82, 0xe7, 0x92, 0x04, 0x38, 0x5d, + 0x06, 0xbd, 0x06, 0xc8, 0xdf, 0x60, 0xfe, 0xfd, 0xf5, 0xcb, 0xc4, 0x13, 0xd6, 0x6a, 0x36, 0x76, + 0xc5, 0x78, 0x4b, 0xb8, 0x9e, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0xf1, 0x9f, 0x06, 0xf2, 0xe3, 0x3f, + 0x75, 0xde, 0x17, 0xbb, 0x66, 0x38, 0xba, 0x04, 0xa3, 0x87, 0xf4, 0x5a, 0xb5, 0xff, 0x83, 0x45, + 0x4f, 0x3c, 0x5e, 0xc6, 0x0c, 0xae, 0xfa, 0x0a, 0x73, 0xab, 0xe5, 0x9a, 0x65, 0x2d, 0xc0, 0xce, + 0x49, 0xcd, 0xad, 0x36, 0x46, 0x62, 0x93, 0x96, 0xcf, 0x21, 0xcd, 0x1d, 0xd6, 0xb8, 0x15, 0x88, + 0x08, 0x70, 0x8a, 0x02, 0x7d, 0x0c, 0x06, 0xeb, 0xee, 0xae, 0x1b, 0x0a, 0xe5, 0xd8, 0xa1, 0x8d, + 0x58, 0xf1, 0xd6, 0xb9, 0xc8, 0xd9, 0x60, 0xc9, 0xcf, 0xfe, 0xe1, 0x42, 0xdc, 0x27, 0xaf, 0xb7, + 0xfd, 0xc8, 0x39, 0x86, 0x93, 0xfc, 0xb2, 0x71, 0x92, 0x3f, 0xd1, 0x29, 0x0c, 0x1e, 0x6b, 0x52, + 0xee, 0x09, 0x7e, 0x3d, 0x71, 0x82, 0x3f, 0xd9, 0x9d, 0x55, 0xe7, 0x93, 0xfb, 0x1f, 0x59, 0x30, + 0x69, 0xd0, 0x1f, 0xc3, 0x01, 0xb2, 0x6c, 0x1e, 0x20, 0x8f, 0x76, 0xfd, 0x86, 0x9c, 0x83, 0xe3, + 0xfb, 0x8a, 0x89, 0xb6, 0xb3, 0x03, 0xe3, 0x2d, 0xe8, 0xdb, 0x76, 0x82, 0x7a, 0xa7, 0xb4, 0x1f, + 0xa9, 0x42, 0xb3, 0x57, 0x9c, 0x40, 0x58, 0xf8, 0x9f, 0x91, 0xbd, 0x4e, 0x41, 0x5d, 0xad, 0xfb, + 0xac, 0x2a, 0xf4, 0x12, 0x0c, 0x84, 0x35, 0xbf, 0xa5, 0x9e, 0xfa, 0x9c, 0x63, 0x1d, 0xcd, 0x20, + 0x07, 0xfb, 0x65, 0x64, 0x56, 0x47, 0xc1, 0x58, 0xd0, 0xa3, 0x37, 0x60, 0x94, 0xfd, 0x52, 0xee, + 0x76, 0xc5, 0x7c, 0x0d, 0x46, 0x55, 0x27, 0xe4, 0xbe, 0xa8, 0x06, 0x08, 0x9b, 0xac, 0x66, 0xb6, + 0xa0, 0xa4, 0x3e, 0xeb, 0x81, 0x5a, 0x89, 0xff, 0x6d, 0x11, 0xa6, 0x32, 0xe6, 0x1c, 0x0a, 0x8d, + 0x91, 0xb8, 0xd4, 0xe3, 0x54, 0x7d, 0x9b, 0x63, 0x11, 0xb2, 0x0b, 0x54, 0x5d, 0xcc, 0xad, 0x9e, + 0x2b, 0xbd, 0x11, 0x92, 0x64, 0xa5, 0x14, 0xd4, 0xbd, 0x52, 0x5a, 0xd9, 0xb1, 0x75, 0x35, 0xad, + 0x48, 0xb5, 0xf4, 0x81, 0x8e, 0xe9, 0xaf, 0xf5, 0xc1, 0x89, 0xac, 0xc8, 0x9c, 0xe8, 0x33, 0x89, + 0xa4, 0xb3, 0x2f, 0xf4, 0x1a, 0xd3, 0x93, 0x67, 0xa2, 0x15, 0x11, 0x03, 0x67, 0xcd, 0x34, 0xb4, + 0x5d, 0xbb, 0x59, 0xd4, 0xc9, 0x62, 0x96, 0x04, 0x3c, 0x59, 0xb0, 0xdc, 0x3e, 0xde, 0xdf, 0x73, + 0x03, 0x44, 0x96, 0xe1, 0x30, 0xe1, 0xca, 0x23, 0xc1, 0xdd, 0x5d, 0x79, 0x64, 0xcd, 0x68, 0x05, + 0x06, 0x6a, 0xdc, 0x47, 0xa4, 0xd8, 0x7d, 0x0b, 0xe3, 0x0e, 0x22, 0x6a, 0x03, 0x16, 0x8e, 0x21, + 0x82, 0xc1, 0x8c, 0x0b, 0xc3, 0x5a, 0xc7, 0x3c, 0xd0, 0xc9, 0xb3, 0x43, 0x0f, 0x3e, 0xad, 0x0b, + 0x1e, 0xe8, 0x04, 0xfa, 0x31, 0x0b, 0x12, 0x0f, 0x45, 0x94, 0x52, 0xce, 0xca, 0x55, 0xca, 0x9d, + 0x83, 0xbe, 0xc0, 0x6f, 0x90, 0x64, 0xa2, 0x57, 0xec, 0x37, 0x08, 0x66, 0x18, 0x4a, 0x11, 0xc5, + 0xaa, 0x96, 0x11, 0xfd, 0x1a, 0x29, 0x2e, 0x88, 0x8f, 0x41, 0x7f, 0x83, 0xec, 0x92, 0x46, 0x32, + 0x1f, 0xd7, 0x35, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x42, 0x1f, 0x9c, 0xe9, 0x18, 0x40, 0x88, 0x5e, + 0xc6, 0xb6, 0x9c, 0x88, 0xdc, 0x76, 0xf6, 0x92, 0x89, 0x73, 0x2e, 0x73, 0x30, 0x96, 0x78, 0xf6, + 0x6a, 0x91, 0xc7, 0xbf, 0x4f, 0xa8, 0x30, 0x45, 0xd8, 0x7b, 0x81, 0x35, 0x55, 0x62, 0xc5, 0xa3, + 0x50, 0x89, 0x3d, 0x07, 0x10, 0x86, 0x0d, 0xee, 0x4e, 0x57, 0x17, 0xcf, 0x21, 0xe3, 0x3c, 0x09, + 0xd5, 0x6b, 0x02, 0x83, 0x35, 0x2a, 0xb4, 0x08, 0x13, 0xad, 0xc0, 0x8f, 0xb8, 0x46, 0x78, 0x91, + 0x7b, 0x9c, 0xf6, 0x9b, 0xb1, 0x5b, 0x2a, 0x09, 0x3c, 0x4e, 0x95, 0x40, 0x2f, 0xc2, 0xb0, 0x88, + 0xe7, 0x52, 0xf1, 0xfd, 0x86, 0x50, 0x42, 0x29, 0x27, 0xcc, 0x6a, 0x8c, 0xc2, 0x3a, 0x9d, 0x56, + 0x8c, 0xa9, 0x99, 0x07, 0x33, 0x8b, 0x71, 0x55, 0xb3, 0x46, 0x97, 0x08, 0xf8, 0x3b, 0xd4, 0x53, + 0xc0, 0xdf, 0x58, 0x2d, 0x57, 0xea, 0xd9, 0xea, 0x09, 0x5d, 0x15, 0x59, 0x5f, 0xe9, 0x83, 0x29, + 0x31, 0x71, 0x1e, 0xf4, 0x74, 0xb9, 0x91, 0x9e, 0x2e, 0x47, 0xa1, 0xb8, 0x7b, 0x77, 0xce, 0x1c, + 0xf7, 0x9c, 0xf9, 0x11, 0x0b, 0x4c, 0x49, 0x0d, 0xfd, 0x7f, 0xb9, 0x99, 0xc7, 0x5e, 0xcc, 0x95, + 0xfc, 0x94, 0xc3, 0xe1, 0xdb, 0xcc, 0x41, 0x66, 0xff, 0x3b, 0x0b, 0x1e, 0xed, 0xca, 0x11, 0x2d, + 0x41, 0x89, 0x89, 0x93, 0xda, 0x45, 0xef, 0x49, 0xe5, 0x91, 0x2e, 0x11, 0x39, 0xd2, 0x6d, 0x5c, + 0x12, 0x2d, 0xa5, 0x52, 0xbc, 0x3d, 0x95, 0x91, 0xe2, 0xed, 0xa4, 0xd1, 0x3d, 0xf7, 0x99, 0xe3, + 0xed, 0x87, 0xe8, 0x89, 0x63, 0xbc, 0x06, 0x43, 0xef, 0x37, 0x94, 0x8e, 0x76, 0x42, 0xe9, 0x88, + 0x4c, 0x6a, 0xed, 0x0c, 0xf9, 0x08, 0x4c, 0xb0, 0x40, 0x6f, 0xec, 0x7d, 0x84, 0x78, 0xa7, 0x56, + 0x88, 0x7d, 0xa0, 0xaf, 0x25, 0x70, 0x38, 0x45, 0x6d, 0xff, 0x51, 0x11, 0x06, 0xf8, 0xf2, 0x3b, + 0x86, 0xeb, 0xe5, 0xd3, 0x50, 0x72, 0x9b, 0xcd, 0x36, 0xcf, 0xda, 0xd5, 0x1f, 0x7b, 0xd4, 0xae, + 0x48, 0x20, 0x8e, 0xf1, 0x68, 0x59, 0xe8, 0xbb, 0x3b, 0xc4, 0x92, 0xe5, 0x0d, 0x9f, 0x5d, 0x74, + 0x22, 0x87, 0xcb, 0x4a, 0xea, 0x9c, 0x8d, 0x35, 0xe3, 0xe8, 0x93, 0x00, 0x61, 0x14, 0xb8, 0xde, + 0x16, 0x85, 0x89, 0x10, 0xd6, 0xef, 0xed, 0xc0, 0xad, 0xaa, 0x88, 0x39, 0xcf, 0x78, 0xcf, 0x51, + 0x08, 0xac, 0x71, 0x44, 0xb3, 0xc6, 0x49, 0x3f, 0x93, 0x18, 0x3b, 0xe0, 0x5c, 0xe3, 0x31, 0x9b, + 0xf9, 0x00, 0x94, 0x14, 0xf3, 0x6e, 0xda, 0xaf, 0x11, 0x5d, 0x2c, 0xfa, 0x30, 0x8c, 0x27, 0xda, + 0x76, 0x28, 0xe5, 0xd9, 0x2f, 0x5a, 0x30, 0xce, 0x1b, 0xb3, 0xe4, 0xed, 0x8a, 0xd3, 0xe0, 0x2e, + 0x9c, 0x68, 0x64, 0xec, 0xca, 0x62, 0xf8, 0x7b, 0xdf, 0xc5, 0x95, 0xb2, 0x2c, 0x0b, 0x8b, 0x33, + 0xeb, 0x40, 0x17, 0xe8, 0x8a, 0xa3, 0xbb, 0xae, 0xd3, 0x10, 0xcf, 0xf2, 0x47, 0xf8, 0x6a, 0xe3, + 0x30, 0xac, 0xb0, 0xf6, 0xef, 0x59, 0x30, 0xc9, 0x5b, 0x7e, 0x95, 0xec, 0xa9, 0xbd, 0xe9, 0x5b, + 0xd9, 0x76, 0x91, 0x2f, 0xb2, 0x90, 0x93, 0x2f, 0x52, 0xff, 0xb4, 0x62, 0xc7, 0x4f, 0xfb, 0xb2, + 0x05, 0x62, 0x86, 0x1c, 0x83, 0x3e, 0xe3, 0xbb, 0x4c, 0x7d, 0xc6, 0x4c, 0xfe, 0x22, 0xc8, 0x51, + 0x64, 0xfc, 0x99, 0x05, 0x13, 0x9c, 0x20, 0xb6, 0xd5, 0x7f, 0x4b, 0xc7, 0xa1, 0x97, 0xac, 0xf2, + 0x57, 0xc9, 0xde, 0xba, 0x5f, 0x71, 0xa2, 0xed, 0xec, 0x8f, 0x32, 0x06, 0xab, 0xaf, 0xe3, 0x60, + 0xd5, 0xe5, 0x02, 0x32, 0xd2, 0x29, 0x75, 0x79, 0x5c, 0x7f, 0xd8, 0x74, 0x4a, 0xf6, 0x37, 0x2d, + 0x40, 0xbc, 0x1a, 0x43, 0x70, 0xa3, 0xe2, 0x10, 0x83, 0x6a, 0x07, 0x5d, 0xbc, 0x35, 0x29, 0x0c, + 0xd6, 0xa8, 0x8e, 0xa4, 0x7b, 0x12, 0x0e, 0x17, 0xc5, 0xee, 0x0e, 0x17, 0x87, 0xe8, 0xd1, 0x7f, + 0x31, 0x00, 0xc9, 0x17, 0x71, 0xe8, 0x26, 0x8c, 0xd4, 0x9c, 0x96, 0xb3, 0xe1, 0x36, 0xdc, 0xc8, + 0x25, 0x61, 0x27, 0x6f, 0xac, 0x05, 0x8d, 0x4e, 0x98, 0xc8, 0x35, 0x08, 0x36, 0xf8, 0xa0, 0x59, + 0x80, 0x56, 0xe0, 0xee, 0xba, 0x0d, 0xb2, 0xc5, 0xd4, 0x2e, 0x2c, 0x10, 0x08, 0x77, 0x0d, 0x93, + 0x50, 0xac, 0x51, 0x64, 0x84, 0x1f, 0x28, 0x3e, 0xe0, 0xf0, 0x03, 0x70, 0x6c, 0xe1, 0x07, 0xfa, + 0x0e, 0x15, 0x7e, 0x60, 0xe8, 0xd0, 0xe1, 0x07, 0xfa, 0x7b, 0x0a, 0x3f, 0x80, 0xe1, 0x94, 0x94, + 0x3d, 0xe9, 0xff, 0x65, 0xb7, 0x41, 0xc4, 0x85, 0x83, 0x47, 0x2f, 0x99, 0xb9, 0xb7, 0x5f, 0x3e, + 0x85, 0x33, 0x29, 0x70, 0x4e, 0x49, 0xf4, 0x51, 0x98, 0x76, 0x1a, 0x0d, 0xff, 0xb6, 0x1a, 0xd4, + 0xa5, 0xb0, 0xe6, 0x34, 0xb8, 0x09, 0x64, 0x90, 0x71, 0x7d, 0xe4, 0xde, 0x7e, 0x79, 0x7a, 0x2e, + 0x87, 0x06, 0xe7, 0x96, 0x46, 0x1f, 0x82, 0x52, 0x2b, 0xf0, 0x6b, 0xab, 0xda, 0xb3, 0xdd, 0xb3, + 0xb4, 0x03, 0x2b, 0x12, 0x78, 0xb0, 0x5f, 0x1e, 0x55, 0x7f, 0xd8, 0x81, 0x1f, 0x17, 0xc8, 0x88, + 0x27, 0x30, 0x7c, 0xa4, 0xf1, 0x04, 0x76, 0x60, 0xaa, 0x4a, 0x02, 0xd7, 0x69, 0xb8, 0x77, 0xa9, + 0xbc, 0x2c, 0xf7, 0xa7, 0x75, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xa7, 0xf8, 0xae, 0x5a, 0x5e, 0x1b, + 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x65, 0xc1, 0xa0, 0x78, 0x01, 0x77, 0x0c, 0x52, 0xe3, 0x9c, + 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, 0x25, 0x61, 0x8e, 0x78, 0xb4, + 0x13, 0x93, 0xce, 0x86, 0x88, 0xbf, 0x5e, 0xa4, 0xd2, 0xbb, 0xf1, 0x16, 0xfb, 0xc1, 0x77, 0xc1, + 0x1a, 0x0c, 0x86, 0xe2, 0x2d, 0x70, 0x21, 0xff, 0x31, 0x46, 0x72, 0x10, 0x63, 0x2f, 0x3a, 0xf1, + 0xfa, 0x57, 0x32, 0xc9, 0x7c, 0x64, 0x5c, 0x7c, 0x80, 0x8f, 0x8c, 0xbb, 0xbd, 0x56, 0xef, 0x3b, + 0x8a, 0xd7, 0xea, 0xf6, 0xd7, 0xd8, 0xc9, 0xa9, 0xc3, 0x8f, 0x41, 0xa8, 0xba, 0x6c, 0x9e, 0xb1, + 0x76, 0x87, 0x99, 0x25, 0x1a, 0x95, 0x23, 0x5c, 0xfd, 0xbc, 0x05, 0x67, 0x32, 0xbe, 0x4a, 0x93, + 0xb4, 0x9e, 0x81, 0x21, 0xa7, 0x5d, 0x77, 0xd5, 0x5a, 0xd6, 0x4c, 0x93, 0x73, 0x02, 0x8e, 0x15, + 0x05, 0x5a, 0x80, 0x49, 0x72, 0xa7, 0xe5, 0x72, 0x43, 0xae, 0xee, 0x7c, 0x5c, 0xe4, 0xcf, 0x26, + 0x97, 0x92, 0x48, 0x9c, 0xa6, 0x57, 0x71, 0x8d, 0x8a, 0xb9, 0x71, 0x8d, 0xfe, 0x8e, 0x05, 0xc3, + 0xea, 0x35, 0xec, 0x03, 0xef, 0xed, 0x8f, 0x98, 0xbd, 0xfd, 0x70, 0x87, 0xde, 0xce, 0xe9, 0xe6, + 0xdf, 0x29, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x41, 0x82, 0xbb, 0xff, 0x87, 0x13, 0x97, 0x60, + 0xd8, 0x69, 0xb5, 0x24, 0x42, 0x7a, 0xc0, 0xb1, 0x68, 0xdd, 0x31, 0x18, 0xeb, 0x34, 0xea, 0x1d, + 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x2d, 0x12, 0x51, 0x98, 0x70, 0xd8, 0xcd, + 0xdf, 0x6f, 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, 0xd9, 0x15, 0x2f, 0xba, 0x1e, + 0xf0, 0x2b, 0xa4, 0x16, 0x19, 0x4c, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, 0xf9, 0x81, 0xd5, 0xd1, 0x6f, + 0xba, 0x52, 0xac, 0x09, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd8, 0xe9, 0xc3, 0xfa, 0xf4, 0x70, 0x51, + 0xb1, 0x7e, 0x6a, 0x44, 0x8d, 0x06, 0x33, 0x8a, 0x2e, 0xea, 0xb1, 0xb7, 0x3a, 0x6f, 0xf6, 0xb4, + 0x62, 0xfd, 0x41, 0x62, 0x1c, 0xa0, 0x0b, 0x7d, 0x3c, 0xe5, 0x1e, 0xf3, 0x6c, 0x97, 0x53, 0xe3, + 0x10, 0x0e, 0x31, 0x2c, 0x75, 0x0f, 0x4b, 0x6c, 0xb2, 0x52, 0x11, 0xeb, 0x42, 0x4b, 0xdd, 0x23, + 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa3, 0x38, 0x84, 0xad, 0xa2, 0x0e, 0xb1, + 0x46, 0x81, 0x2e, 0x0a, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x9c, 0x50, 0x28, 0xc8, 0xee, 0xd2, 0xb4, + 0x40, 0x97, 0x60, 0x58, 0x25, 0x6a, 0xaf, 0xf0, 0xa4, 0x59, 0x62, 0x9a, 0x2d, 0xc5, 0x60, 0xac, + 0xd3, 0xa0, 0x75, 0x18, 0x0f, 0xb9, 0x9e, 0x4d, 0xc5, 0x15, 0xe7, 0xfa, 0xca, 0xf7, 0xaa, 0x77, + 0xc8, 0x26, 0xfa, 0x80, 0x81, 0xf8, 0xee, 0x24, 0xa3, 0x33, 0x24, 0x59, 0xa0, 0x57, 0x61, 0xac, + 0xe1, 0x3b, 0xf5, 0x79, 0xa7, 0xe1, 0x78, 0x35, 0xd6, 0x3f, 0x43, 0x66, 0xbe, 0xdf, 0x6b, 0x06, + 0x16, 0x27, 0xa8, 0xa9, 0xf0, 0xa6, 0x43, 0x44, 0x74, 0x31, 0xc7, 0xdb, 0x22, 0xa1, 0x48, 0xbb, + 0xcd, 0x84, 0xb7, 0x6b, 0x39, 0x34, 0x38, 0xb7, 0x34, 0x7a, 0x09, 0x46, 0xe4, 0xe7, 0x6b, 0xc1, + 0x4c, 0xe2, 0x27, 0x31, 0x1a, 0x0e, 0x1b, 0x94, 0x28, 0x84, 0x93, 0xf2, 0xff, 0x7a, 0xe0, 0x6c, + 0x6e, 0xba, 0x35, 0xf1, 0xc2, 0x9f, 0x3f, 0xbb, 0xfd, 0xb0, 0x7c, 0x1b, 0xba, 0x94, 0x45, 0x74, + 0xb0, 0x5f, 0x7e, 0x44, 0xf4, 0x5a, 0x26, 0x1e, 0x67, 0xf3, 0x46, 0xab, 0x30, 0xb5, 0x4d, 0x9c, + 0x46, 0xb4, 0xbd, 0xb0, 0x4d, 0x6a, 0x3b, 0x72, 0xc1, 0xb1, 0xf0, 0x28, 0xda, 0xd3, 0x91, 0x2b, + 0x69, 0x12, 0x9c, 0x55, 0x0e, 0xbd, 0x09, 0xd3, 0xad, 0xf6, 0x46, 0xc3, 0x0d, 0xb7, 0xd7, 0xfc, + 0x88, 0x39, 0x21, 0xa9, 0x9c, 0xef, 0x22, 0x8e, 0x8a, 0x0a, 0x40, 0x53, 0xc9, 0xa1, 0xc3, 0xb9, + 0x1c, 0xd0, 0x5d, 0x38, 0x99, 0x98, 0x08, 0x22, 0x92, 0xc4, 0x58, 0x7e, 0x56, 0x91, 0x6a, 0x56, + 0x01, 0x11, 0x94, 0x25, 0x0b, 0x85, 0xb3, 0xab, 0x40, 0x2f, 0x03, 0xb8, 0xad, 0x65, 0xa7, 0xe9, + 0x36, 0xe8, 0x55, 0x71, 0x8a, 0xcd, 0x11, 0x7a, 0x6d, 0x80, 0x95, 0x8a, 0x84, 0xd2, 0xbd, 0x59, + 0xfc, 0xdb, 0xc3, 0x1a, 0x35, 0xba, 0x06, 0x63, 0xe2, 0xdf, 0x9e, 0x18, 0x52, 0x1e, 0xd0, 0xe4, + 0x71, 0x16, 0x8d, 0xaa, 0xa2, 0x63, 0x0e, 0x52, 0x10, 0x9c, 0x28, 0x8b, 0xb6, 0xe0, 0x8c, 0xcc, + 0x10, 0xa7, 0xcf, 0x4f, 0x39, 0x06, 0x21, 0x4b, 0xe5, 0x31, 0xc4, 0x5f, 0xa5, 0xcc, 0x75, 0x22, + 0xc4, 0x9d, 0xf9, 0xd0, 0x73, 0x5d, 0x9f, 0xe6, 0xfc, 0xc9, 0xef, 0x49, 0xee, 0xe1, 0x44, 0xcf, + 0xf5, 0x6b, 0x49, 0x24, 0x4e, 0xd3, 0x23, 0x1f, 0x4e, 0xba, 0x5e, 0xd6, 0xac, 0x3e, 0xc5, 0x18, + 0x7d, 0x90, 0xbf, 0x76, 0xee, 0x3c, 0xa3, 0x33, 0xf1, 0x38, 0x9b, 0xef, 0xdb, 0xf3, 0xfb, 0xfb, + 0x5d, 0x8b, 0x96, 0xd6, 0xa4, 0x73, 0xf4, 0x29, 0x18, 0xd1, 0x3f, 0x4a, 0x48, 0x1a, 0xe7, 0xb3, + 0x85, 0x57, 0x6d, 0x4f, 0xe0, 0xb2, 0xbd, 0x5a, 0xf7, 0x3a, 0x0e, 0x1b, 0x1c, 0x51, 0x2d, 0x23, + 0x26, 0xc0, 0xc5, 0xde, 0x24, 0x99, 0xde, 0xdd, 0xde, 0x08, 0x64, 0x4f, 0x77, 0x74, 0x0d, 0x86, + 0x6a, 0x0d, 0x97, 0x78, 0xd1, 0x4a, 0xa5, 0x53, 0xd4, 0xc3, 0x05, 0x41, 0x23, 0xd6, 0x8f, 0xc8, + 0xca, 0xc1, 0x61, 0x58, 0x71, 0xb0, 0x7f, 0xa3, 0x00, 0xe5, 0x2e, 0x29, 0x5e, 0x12, 0x66, 0x28, + 0xab, 0x27, 0x33, 0xd4, 0x1c, 0x8c, 0xc7, 0xff, 0x74, 0x0d, 0x97, 0xf2, 0x64, 0xbd, 0x69, 0xa2, + 0x71, 0x92, 0xbe, 0xe7, 0x47, 0x09, 0xba, 0x25, 0xab, 0xaf, 0xeb, 0xb3, 0x1a, 0xc3, 0x82, 0xdd, + 0xdf, 0xfb, 0xb5, 0x37, 0xd7, 0x1a, 0x69, 0x7f, 0xad, 0x00, 0x27, 0x55, 0x17, 0x7e, 0xe7, 0x76, + 0xdc, 0x8d, 0x74, 0xc7, 0x1d, 0x81, 0x2d, 0xd7, 0xbe, 0x0e, 0x03, 0x3c, 0x8c, 0x63, 0x0f, 0xe2, + 0xf6, 0x63, 0x66, 0x70, 0x67, 0x25, 0xe1, 0x19, 0x01, 0x9e, 0x7f, 0xc0, 0x82, 0xf1, 0xc4, 0xeb, + 0x36, 0x84, 0xb5, 0x27, 0xd0, 0xf7, 0x23, 0x12, 0x67, 0x09, 0xdb, 0xe7, 0xa0, 0x6f, 0xdb, 0x0f, + 0xa3, 0xa4, 0xa3, 0xc7, 0x15, 0x3f, 0x8c, 0x30, 0xc3, 0xd8, 0xbf, 0x6f, 0x41, 0xff, 0xba, 0xe3, + 0x7a, 0x91, 0x34, 0x0a, 0x58, 0x39, 0x46, 0x81, 0x5e, 0xbe, 0x0b, 0xbd, 0x08, 0x03, 0x64, 0x73, + 0x93, 0xd4, 0x22, 0x31, 0xaa, 0x32, 0xf4, 0xc4, 0xc0, 0x12, 0x83, 0x52, 0xf9, 0x8f, 0x55, 0xc6, + 0xff, 0x62, 0x41, 0x8c, 0x6e, 0x41, 0x29, 0x72, 0x9b, 0x64, 0xae, 0x5e, 0x17, 0xa6, 0xf2, 0xfb, + 0x08, 0x9f, 0xb1, 0x2e, 0x19, 0xe0, 0x98, 0x97, 0xfd, 0x85, 0x02, 0x40, 0x1c, 0xff, 0xaa, 0xdb, + 0x27, 0xce, 0xa7, 0x8c, 0xa8, 0xe7, 0x33, 0x8c, 0xa8, 0x28, 0x66, 0x98, 0x61, 0x41, 0x55, 0xdd, + 0x54, 0xec, 0xa9, 0x9b, 0xfa, 0x0e, 0xd3, 0x4d, 0x0b, 0x30, 0x19, 0xc7, 0xef, 0x32, 0xc3, 0x17, + 0xb2, 0xa3, 0x73, 0x3d, 0x89, 0xc4, 0x69, 0x7a, 0x9b, 0xc0, 0x39, 0x15, 0xc6, 0x48, 0x9c, 0x68, + 0xcc, 0x0f, 0x5c, 0x37, 0x4a, 0x77, 0xe9, 0xa7, 0xd8, 0x4a, 0x5c, 0xc8, 0xb5, 0x12, 0xff, 0xa4, + 0x05, 0x27, 0x92, 0xf5, 0xb0, 0x47, 0xd3, 0x9f, 0xb7, 0xe0, 0x24, 0xb3, 0x95, 0xb3, 0x5a, 0xd3, + 0x96, 0xf9, 0x17, 0x3a, 0x86, 0x66, 0xca, 0x69, 0x71, 0x1c, 0xe3, 0x64, 0x35, 0x8b, 0x35, 0xce, + 0xae, 0xd1, 0xfe, 0x9f, 0x7d, 0x30, 0x9d, 0x17, 0xd3, 0x89, 0x3d, 0x13, 0x71, 0xee, 0x54, 0x77, + 0xc8, 0x6d, 0xe1, 0x8c, 0x1f, 0x3f, 0x13, 0xe1, 0x60, 0x2c, 0xf1, 0xc9, 0xac, 0x1d, 0x85, 0x1e, + 0xb3, 0x76, 0x6c, 0xc3, 0xe4, 0xed, 0x6d, 0xe2, 0xdd, 0xf0, 0x42, 0x27, 0x72, 0xc3, 0x4d, 0x97, + 0xd9, 0x95, 0xf9, 0xbc, 0x91, 0xa9, 0x7e, 0x27, 0x6f, 0x25, 0x09, 0x0e, 0xf6, 0xcb, 0x67, 0x0c, + 0x40, 0xdc, 0x64, 0xbe, 0x91, 0xe0, 0x34, 0xd3, 0x74, 0xd2, 0x93, 0xbe, 0x07, 0x9c, 0xf4, 0xa4, + 0xe9, 0x0a, 0x6f, 0x14, 0xf9, 0x06, 0x80, 0xdd, 0x18, 0x57, 0x15, 0x14, 0x6b, 0x14, 0xe8, 0x13, + 0x80, 0xf4, 0xa4, 0x4e, 0x46, 0x48, 0xcd, 0x67, 0xef, 0xed, 0x97, 0xd1, 0x5a, 0x0a, 0x7b, 0xb0, + 0x5f, 0x9e, 0xa2, 0xd0, 0x15, 0x8f, 0xde, 0x3c, 0xe3, 0x38, 0x64, 0x19, 0x8c, 0xd0, 0x2d, 0x98, + 0xa0, 0x50, 0xb6, 0xa2, 0x64, 0xbc, 0x4e, 0x7e, 0x5b, 0x7c, 0xfa, 0xde, 0x7e, 0x79, 0x62, 0x2d, + 0x81, 0xcb, 0x63, 0x9d, 0x62, 0x82, 0x5e, 0x86, 0xb1, 0x78, 0x5e, 0x5d, 0x25, 0x7b, 0x3c, 0x3e, + 0x4e, 0x89, 0x2b, 0xbc, 0x57, 0x0d, 0x0c, 0x4e, 0x50, 0xda, 0x9f, 0xb7, 0xe0, 0x74, 0x6e, 0xe2, + 0x71, 0x74, 0x01, 0x86, 0x9c, 0x96, 0xcb, 0xcd, 0x17, 0xe2, 0xa8, 0x61, 0x6a, 0xb2, 0xca, 0x0a, + 0x37, 0x5e, 0x28, 0x2c, 0xdd, 0xe1, 0x77, 0x5c, 0xaf, 0x9e, 0xdc, 0xe1, 0xaf, 0xba, 0x5e, 0x1d, + 0x33, 0x8c, 0x3a, 0xb2, 0x8a, 0xb9, 0x4f, 0x11, 0xbe, 0x42, 0xd7, 0x6a, 0x46, 0x8a, 0xf2, 0xe3, + 0x6d, 0x06, 0x7a, 0x5a, 0x37, 0x35, 0x0a, 0xaf, 0xc2, 0x5c, 0x33, 0xe3, 0xf7, 0x5b, 0x20, 0x9e, + 0x2e, 0xf7, 0x70, 0x26, 0xbf, 0x01, 0x23, 0xbb, 0xe9, 0x84, 0x77, 0xe7, 0xf2, 0xdf, 0x72, 0x8b, + 0x40, 0xe1, 0x4a, 0xd0, 0x36, 0x92, 0xdb, 0x19, 0xbc, 0xec, 0x3a, 0x08, 0xec, 0x22, 0x61, 0x06, + 0x85, 0xee, 0xad, 0x79, 0x0e, 0xa0, 0xce, 0x68, 0x59, 0x16, 0xdc, 0x82, 0x29, 0x71, 0x2d, 0x2a, + 0x0c, 0xd6, 0xa8, 0xec, 0x7f, 0x55, 0x80, 0x61, 0x99, 0x60, 0xad, 0xed, 0xf5, 0xa2, 0xf6, 0x3b, + 0x54, 0xc6, 0x65, 0x74, 0x11, 0x4a, 0x4c, 0x2f, 0x5d, 0x89, 0xb5, 0xa5, 0x4a, 0x2b, 0xb4, 0x2a, + 0x11, 0x38, 0xa6, 0xa1, 0xbb, 0x63, 0xd8, 0xde, 0x60, 0xe4, 0x89, 0x87, 0xb6, 0x55, 0x0e, 0xc6, + 0x12, 0x8f, 0x3e, 0x0a, 0x13, 0xbc, 0x5c, 0xe0, 0xb7, 0x9c, 0x2d, 0x6e, 0xcb, 0xea, 0x57, 0xd1, + 0x4b, 0x26, 0x56, 0x13, 0xb8, 0x83, 0xfd, 0xf2, 0x89, 0x24, 0x8c, 0x19, 0x69, 0x53, 0x5c, 0x98, + 0xcb, 0x1a, 0xaf, 0x84, 0xee, 0xea, 0x29, 0x4f, 0xb7, 0x18, 0x85, 0x75, 0x3a, 0xfb, 0x53, 0x80, + 0xd2, 0xa9, 0xe6, 0xd0, 0x6b, 0xdc, 0xe5, 0xd9, 0x0d, 0x48, 0xbd, 0x93, 0xd1, 0x56, 0x8f, 0xd1, + 0x21, 0xdf, 0xc8, 0xf1, 0x52, 0x58, 0x95, 0xb7, 0xff, 0x52, 0x11, 0x26, 0x92, 0x51, 0x01, 0xd0, + 0x15, 0x18, 0xe0, 0x22, 0xa5, 0x60, 0xdf, 0xc1, 0x27, 0x48, 0x8b, 0x25, 0xc0, 0x0e, 0x57, 0x21, + 0x95, 0x8a, 0xf2, 0xe8, 0x4d, 0x18, 0xae, 0xfb, 0xb7, 0xbd, 0xdb, 0x4e, 0x50, 0x9f, 0xab, 0xac, + 0x88, 0xe9, 0x9c, 0xa9, 0xa8, 0x58, 0x8c, 0xc9, 0xf4, 0xf8, 0x04, 0xcc, 0xfe, 0x1d, 0xa3, 0xb0, + 0xce, 0x0e, 0xad, 0xb3, 0xfc, 0x14, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xea, 0xf4, 0xfe, 0x65, 0x41, + 0x12, 0x69, 0x9c, 0x47, 0x45, 0x12, 0x0b, 0x8e, 0xc0, 0x31, 0x23, 0xf4, 0x19, 0x98, 0x0a, 0x73, + 0x4c, 0x27, 0x79, 0x99, 0x47, 0x3b, 0x59, 0x13, 0xe6, 0x1f, 0xba, 0xb7, 0x5f, 0x9e, 0xca, 0x32, + 0xb2, 0x64, 0x55, 0x63, 0x7f, 0xf1, 0x04, 0x18, 0x8b, 0xd8, 0x48, 0x44, 0x6d, 0x1d, 0x51, 0x22, + 0x6a, 0x0c, 0x43, 0xa4, 0xd9, 0x8a, 0xf6, 0x16, 0xdd, 0x40, 0x8c, 0x49, 0x26, 0xcf, 0x25, 0x41, + 0x93, 0xe6, 0x29, 0x31, 0x58, 0xf1, 0xc9, 0xce, 0x16, 0x5e, 0xfc, 0x16, 0x66, 0x0b, 0xef, 0x3b, + 0xc6, 0x6c, 0xe1, 0x6b, 0x30, 0xb8, 0xe5, 0x46, 0x98, 0xb4, 0x7c, 0x71, 0x99, 0xcb, 0x9c, 0x87, + 0x97, 0x39, 0x49, 0x3a, 0x2f, 0xad, 0x40, 0x60, 0xc9, 0x04, 0xbd, 0xa6, 0x56, 0xe0, 0x40, 0xbe, + 0xc2, 0x25, 0xed, 0xbc, 0x92, 0xb9, 0x06, 0x45, 0x4e, 0xf0, 0xc1, 0xfb, 0xcd, 0x09, 0xbe, 0x2c, + 0x33, 0x79, 0x0f, 0xe5, 0x3f, 0x56, 0x63, 0x89, 0xba, 0xbb, 0xe4, 0xef, 0xbe, 0xa9, 0x67, 0x3f, + 0x2f, 0xe5, 0xef, 0x04, 0x2a, 0xb1, 0x79, 0x8f, 0x39, 0xcf, 0xbf, 0xdf, 0x82, 0x93, 0xc9, 0xec, + 0xa4, 0xec, 0x4d, 0x85, 0xf0, 0xf3, 0x78, 0xb1, 0x97, 0x74, 0xb1, 0xac, 0x80, 0x51, 0x21, 0xd3, + 0x91, 0x66, 0x92, 0xe1, 0xec, 0xea, 0x68, 0x47, 0x07, 0x1b, 0x75, 0xe1, 0x6f, 0xf0, 0x58, 0x4e, + 0xf2, 0xf4, 0x0e, 0x29, 0xd3, 0xd7, 0x33, 0x12, 0x75, 0x3f, 0x9e, 0x97, 0xa8, 0xbb, 0xe7, 0xf4, + 0xdc, 0xaf, 0xa9, 0xb4, 0xe9, 0xa3, 0xf9, 0x53, 0x89, 0x27, 0x45, 0xef, 0x9a, 0x2c, 0xfd, 0x35, + 0x95, 0x2c, 0xbd, 0x43, 0x44, 0x6e, 0x9e, 0x0a, 0xbd, 0x6b, 0x8a, 0x74, 0x2d, 0xcd, 0xf9, 0xf8, + 0xd1, 0xa4, 0x39, 0x37, 0x8e, 0x1a, 0x9e, 0x69, 0xfb, 0xe9, 0x2e, 0x47, 0x8d, 0xc1, 0xb7, 0xf3, + 0x61, 0xc3, 0x53, 0xba, 0x4f, 0xde, 0x57, 0x4a, 0xf7, 0x9b, 0x7a, 0x8a, 0x74, 0xd4, 0x25, 0x07, + 0x38, 0x25, 0xea, 0x31, 0x31, 0xfa, 0x4d, 0xfd, 0x00, 0x9c, 0xca, 0xe7, 0xab, 0xce, 0xb9, 0x34, + 0xdf, 0xcc, 0x23, 0x30, 0x95, 0x70, 0xfd, 0xc4, 0xf1, 0x24, 0x5c, 0x3f, 0x79, 0xe4, 0x09, 0xd7, + 0x4f, 0x1d, 0x43, 0xc2, 0xf5, 0x87, 0x8e, 0x31, 0xe1, 0xfa, 0x4d, 0xe6, 0x1c, 0xc5, 0x03, 0x40, + 0x89, 0x08, 0xe2, 0x4f, 0xe5, 0xc4, 0x4f, 0x4b, 0x47, 0x89, 0xe2, 0x1f, 0xa7, 0x50, 0x38, 0x66, + 0x95, 0x91, 0xc8, 0x7d, 0xfa, 0x01, 0x24, 0x72, 0x5f, 0x8b, 0x13, 0xb9, 0x9f, 0xce, 0x1f, 0xea, + 0x8c, 0xe7, 0x34, 0x39, 0xe9, 0xdb, 0x6f, 0xea, 0x69, 0xd7, 0x1f, 0xee, 0x60, 0x05, 0xcb, 0x52, + 0x28, 0x77, 0x48, 0xb6, 0xfe, 0x2a, 0x4f, 0xb6, 0xfe, 0x48, 0xfe, 0x4e, 0x9e, 0x3c, 0xee, 0x8c, + 0x14, 0xeb, 0xb4, 0x5d, 0x2a, 0xf6, 0x2a, 0x8b, 0x95, 0x9e, 0xd3, 0x2e, 0x15, 0xbc, 0x35, 0xdd, + 0x2e, 0x85, 0xc2, 0x31, 0x2b, 0xfb, 0x07, 0x0b, 0x70, 0xb6, 0xf3, 0x7a, 0x8b, 0xb5, 0xe4, 0x95, + 0xd8, 0x21, 0x20, 0xa1, 0x25, 0xe7, 0x77, 0xb6, 0x98, 0xaa, 0xe7, 0x78, 0x90, 0x97, 0x61, 0x52, + 0xbd, 0xc3, 0x69, 0xb8, 0xb5, 0xbd, 0xb5, 0xf8, 0x9a, 0xac, 0x22, 0x27, 0x54, 0x93, 0x04, 0x38, + 0x5d, 0x06, 0xcd, 0xc1, 0xb8, 0x01, 0x5c, 0x59, 0x14, 0x77, 0xb3, 0x38, 0x3a, 0xb7, 0x89, 0xc6, + 0x49, 0x7a, 0xfb, 0x4b, 0x16, 0x3c, 0x94, 0x93, 0xa9, 0xb4, 0xe7, 0x70, 0x87, 0x9b, 0x30, 0xde, + 0x32, 0x8b, 0x76, 0x89, 0xd0, 0x6a, 0xe4, 0x43, 0x55, 0x6d, 0x4d, 0x20, 0x70, 0x92, 0xa9, 0xfd, + 0xb3, 0x05, 0x38, 0xd3, 0xd1, 0xb1, 0x14, 0x61, 0x38, 0xb5, 0xd5, 0x0c, 0x9d, 0x85, 0x80, 0xd4, + 0x89, 0x17, 0xb9, 0x4e, 0xa3, 0xda, 0x22, 0x35, 0xcd, 0xce, 0xc1, 0x3c, 0x34, 0x2f, 0xaf, 0x56, + 0xe7, 0xd2, 0x14, 0x38, 0xa7, 0x24, 0x5a, 0x06, 0x94, 0xc6, 0x88, 0x11, 0x66, 0x51, 0xf7, 0xd3, + 0xfc, 0x70, 0x46, 0x09, 0xf4, 0x01, 0x18, 0x55, 0x0e, 0xab, 0xda, 0x88, 0xb3, 0x8d, 0x1d, 0xeb, + 0x08, 0x6c, 0xd2, 0xa1, 0x4b, 0x3c, 0x6d, 0x83, 0x48, 0xf0, 0x21, 0x8c, 0x22, 0xe3, 0x32, 0x27, + 0x83, 0x00, 0x63, 0x9d, 0x66, 0xfe, 0xa5, 0xdf, 0xfc, 0xc6, 0xd9, 0xf7, 0xfc, 0xf6, 0x37, 0xce, + 0xbe, 0xe7, 0xf7, 0xbe, 0x71, 0xf6, 0x3d, 0xdf, 0x7d, 0xef, 0xac, 0xf5, 0x9b, 0xf7, 0xce, 0x5a, + 0xbf, 0x7d, 0xef, 0xac, 0xf5, 0x7b, 0xf7, 0xce, 0x5a, 0x7f, 0x70, 0xef, 0xac, 0xf5, 0x85, 0x3f, + 0x3c, 0xfb, 0x9e, 0x37, 0x50, 0x1c, 0x40, 0xf4, 0x22, 0x1d, 0x9d, 0x8b, 0xbb, 0x97, 0xfe, 0x5f, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x6c, 0x51, 0x7f, 0x2c, 0x10, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -8714,6 +8752,22 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ResizePolicy) > 0 { + for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResizePolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } if m.StartupProbe != nil { { size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) @@ -9022,6 +9076,39 @@ func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerResizePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResizePolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResizePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RestartPolicy) + copy(dAtA[i:], m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i-- + dAtA[i] = 0x12 + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ContainerState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9231,6 +9318,47 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Resources != nil { + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if len(m.AllocatedResources) > 0 { + keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources)) + for k := range m.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + for iNdEx := len(keysForAllocatedResources) - 1; iNdEx >= 0; iNdEx-- { + v := m.AllocatedResources[ResourceName(keysForAllocatedResources[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatedResources[iNdEx]) + copy(dAtA[i:], keysForAllocatedResources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResources[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } if m.Started != nil { i-- if *m.Started { @@ -9977,6 +10105,22 @@ func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error _ = i var l int _ = l + if len(m.ResizePolicy) > 0 { + for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResizePolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } if m.StartupProbe != nil { { size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) @@ -15792,6 +15936,11 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Resize) + copy(dAtA[i:], m.Resize) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resize))) + i-- + dAtA[i] = 0x72 if len(m.EphemeralContainerStatuses) > 0 { for iNdEx := len(m.EphemeralContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { { @@ -20815,6 +20964,12 @@ func (m *Container) Size() (n int) { l = m.StartupProbe.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.ResizePolicy) > 0 { + for _, e := range m.ResizePolicy { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -20851,6 +21006,19 @@ func (m *ContainerPort) Size() (n int) { return n } +func (m *ContainerResizePolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ContainerState) Size() (n int) { if m == nil { return 0 @@ -20940,6 +21108,19 @@ func (m *ContainerStatus) Size() (n int) { if m.Started != nil { n += 2 } + if len(m.AllocatedResources) > 0 { + for k, v := range m.AllocatedResources { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -21282,6 +21463,12 @@ func (m *EphemeralContainerCommon) Size() (n int) { l = m.StartupProbe.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.ResizePolicy) > 0 { + for _, e := range m.ResizePolicy { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -23363,6 +23550,8 @@ func (m *PodStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.Resize) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -25367,6 +25556,11 @@ func (this *Container) String() string { repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," } repeatedStringForVolumeDevices += "}" + repeatedStringForResizePolicy := "[]ContainerResizePolicy{" + for _, f := range this.ResizePolicy { + repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForResizePolicy += "}" s := strings.Join([]string{`&Container{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -25390,6 +25584,7 @@ func (this *Container) String() string { `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `ResizePolicy:` + repeatedStringForResizePolicy + `,`, `}`, }, "") return s @@ -25419,6 +25614,17 @@ func (this *ContainerPort) String() string { }, "") return s } +func (this *ContainerResizePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResizePolicy{`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `}`, + }, "") + return s +} func (this *ContainerState) String() string { if this == nil { return "nil" @@ -25472,6 +25678,16 @@ func (this *ContainerStatus) String() string { if this == nil { return "nil" } + keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources)) + for k := range this.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + mapStringForAllocatedResources := "ResourceList{" + for _, k := range keysForAllocatedResources { + mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)]) + } + mapStringForAllocatedResources += "}" s := strings.Join([]string{`&ContainerStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, @@ -25482,6 +25698,8 @@ func (this *ContainerStatus) String() string { `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, `Started:` + valueToStringGenerated(this.Started) + `,`, + `AllocatedResources:` + mapStringForAllocatedResources + `,`, + `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`, `}`, }, "") return s @@ -25713,6 +25931,11 @@ func (this *EphemeralContainerCommon) String() string { repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," } repeatedStringForVolumeDevices += "}" + repeatedStringForResizePolicy := "[]ContainerResizePolicy{" + for _, f := range this.ResizePolicy { + repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForResizePolicy += "}" s := strings.Join([]string{`&EphemeralContainerCommon{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -25736,6 +25959,7 @@ func (this *EphemeralContainerCommon) String() string { `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `ResizePolicy:` + repeatedStringForResizePolicy + `,`, `}`, }, "") return s @@ -27323,6 +27547,7 @@ func (this *PodStatus) String() string { `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, `PodIPs:` + repeatedStringForPodIPs + `,`, `EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`, + `Resize:` + fmt.Sprintf("%v", this.Resize) + `,`, `}`, }, "") return s @@ -33866,6 +34091,40 @@ func (m *Container) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) + if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -34172,6 +34431,120 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } return nil } +func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ContainerState) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -35060,6 +35433,171 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Started = &b + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocatedResources == nil { + m.AllocatedResources = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AllocatedResources[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -37706,6 +38244,40 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) + if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -55879,6 +56451,38 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resize = PodResizeStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -65016,7 +65620,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + m.ExternalTrafficPolicy = ServiceExternalTrafficPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 12: if wireType != 0 { @@ -65274,7 +65878,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ServiceInternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + s := ServiceInternalTrafficPolicy(dAtA[iNdEx:postIndex]) m.InternalTrafficPolicy = &s iNdEx = postIndex default: diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 22bc1c80..8ef67ca4 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -220,7 +220,6 @@ message CSIPersistentVolumeSource { // controllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an beta field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional @@ -229,9 +228,10 @@ message CSIPersistentVolumeSource { // nodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. - // This is an alpha field and requires enabling CSINodeExpandSecret feature gate. + // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. + // +featureGate=CSINodeExpandSecret // +optional optional SecretReference nodeExpandSecretRef = 10; } @@ -723,6 +723,12 @@ message Container { // +optional optional ResourceRequirements resources = 8; + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + repeated ContainerResizePolicy resizePolicy = 23; + // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional @@ -863,6 +869,17 @@ message ContainerPort { optional string hostIP = 5; } +// ContainerResizePolicy represents resource resize policy for the container. +message ContainerResizePolicy { + // Name of the resource to which this resource resize policy applies. + // Supported values: cpu, memory. + optional string resourceName = 1; + + // Restart policy to apply when specified resource is resized. + // If not specified, it defaults to NotRequired. + optional string restartPolicy = 2; +} + // ContainerState holds a possible state of container. // Only one of its members may be specified. // If none of them is specified, the default one is ContainerStateWaiting. @@ -930,41 +947,76 @@ message ContainerStateWaiting { // ContainerStatus contains details for the current status of this container. message ContainerStatus { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Name is a DNS_LABEL representing the unique name of the container. + // Each container in a pod must have a unique name across all container types. // Cannot be updated. optional string name = 1; - // Details about the container's current condition. + // State holds details about the container's current condition. // +optional optional ContainerState state = 2; - // Details about the container's last termination condition. + // LastTerminationState holds the last termination state of the container to + // help debug container crashes and restarts. This field is not + // populated if the container is still running and RestartCount is 0. // +optional optional ContainerState lastState = 3; - // Specifies whether the container has passed its readiness probe. + // Ready specifies whether the container is currently passing its readiness check. + // The value will change as readiness probes keep executing. If no readiness + // probes are specified, this field defaults to true once the container is + // fully started (see Started field). + // + // The value is typically used to determine whether a container is ready to + // accept traffic. optional bool ready = 4; - // The number of times the container has been restarted. + // RestartCount holds the number of times the container has been restarted. + // Kubelet makes an effort to always increment the value, but there + // are cases when the state may be lost due to node restarts and then the value + // may be reset to 0. The value is never negative. optional int32 restartCount = 5; - // The image the container is running. + // Image is the name of container image that the container is running. + // The container image may not match the image used in the PodSpec, + // as it may have been resolved by the runtime. // More info: https://kubernetes.io/docs/concepts/containers/images. optional string image = 6; - // ImageID of the container's image. + // ImageID is the image ID of the container's image. The image ID may not + // match the image ID of the image used in the PodSpec, as it may have been + // resolved by the runtime. optional string imageID = 7; - // Container's ID in the format '://'. + // ContainerID is the ID of the container in the format '://'. + // Where type is a container runtime identifier, returned from Version call of CRI API + // (for example "containerd"). // +optional optional string containerID = 8; - // Specifies whether the container has passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered successful. - // Resets to false when the container is restarted, or if kubelet loses state temporarily. - // Is always true when no startupProbe is defined. + // Started indicates whether the container has finished its postStart lifecycle hook + // and passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered + // successful. Resets to false when the container is restarted, or if kubelet + // loses state temporarily. In both cases, startup probes will run again. + // Is always true when no startupProbe is defined and container is running and + // has passed the postStart lifecycle hook. The null value must be treated the + // same as false. // +optional optional bool started = 9; + + // AllocatedResources represents the compute resources allocated for this container by the + // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission + // and after successfully admitting desired pod resize. + // +featureGate=InPlacePodVerticalScaling + // +optional + map allocatedResources = 10; + + // Resources represents the compute resource requests and limits that have been successfully + // enacted on the running container after it has been started or has been successfully resized. + // +featureGate=InPlacePodVerticalScaling + // +optional + optional ResourceRequirements resources = 11; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -1040,7 +1092,7 @@ message EmptyDirVolumeSource { // The maximum usage on memory medium EmptyDir would be the minimum value between // the SizeLimit specified here and the sum of memory limits of all containers in a pod. // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; } @@ -1049,11 +1101,8 @@ message EmptyDirVolumeSource { // +structType=atomic message EndpointAddress { // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). optional string ip = 1; // The Hostname of this endpoint @@ -1089,10 +1138,17 @@ message EndpointPort { optional string protocol = 3; // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional optional string appProtocol = 4; @@ -1324,6 +1380,12 @@ message EphemeralContainerCommon { // +optional optional ResourceRequirements resources = 8; + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + repeated ContainerResizePolicy resizePolicy = 23; + // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional @@ -2485,6 +2547,10 @@ message NodeStatus { // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. + // Consumers should assume that addresses can change during the + // lifetime of a Node. However, there are some exceptions where this may not + // be possible, such as Pods that inherit a Node's address in its own status or + // consumers of the downward API (status.hostIP). // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -2666,7 +2732,7 @@ message PersistentVolumeClaim { optional PersistentVolumeClaimStatus status = 3; } -// PersistentVolumeClaimCondition contails details about state of pvc +// PersistentVolumeClaimCondition contains details about state of pvc message PersistentVolumeClaimCondition { optional string type = 1; @@ -3554,7 +3620,7 @@ message PodSpec { repeated EphemeralContainer ephemeralContainers = 34; // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. + // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. // Default to Always. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional @@ -3809,14 +3875,19 @@ message PodSpec { optional bool hostUsers = 37; // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - // More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. + // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + // scheduler will not attempt to schedule the pod. + // + // SchedulingGates can only be set at pod creation time, and be removed only afterwards. + // + // This is a beta feature enabled by the PodSchedulingReadiness feature gate. // - // This is an alpha-level feature enabled by PodSchedulingReadiness feature gate. - // +optional // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name + // +featureGate=PodSchedulingReadiness + // +optional repeated PodSchedulingGate schedulingGates = 38; // ResourceClaims defines which ResourceClaims must be allocated @@ -3924,13 +3995,20 @@ message PodStatus { // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes // +optional optional string qosClass = 9; // Status for any ephemeral containers that have run in this pod. // +optional repeated ContainerStatus ephemeralContainerStatuses = 13; + + // Status of resources resize desired for pod's containers. + // It is empty if no resources resize is pending. + // Any changes to container resources will automatically set this to "Proposed" + // +featureGate=InPlacePodVerticalScaling + // +optional + optional string resize = 14; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded @@ -4123,8 +4201,6 @@ message ProbeHandler { optional TCPSocketAction tcpSocket = 3; // GRPC specifies an action involving a GRPC port. - // This is a beta field and requires enabling GRPCContainerProbe feature gate. - // +featureGate=GRPCContainerProbe // +optional optional GRPCAction grpc = 4; } @@ -4374,6 +4450,7 @@ message ReplicationControllerSpec { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional optional PodTemplateSpec template = 3; @@ -4502,7 +4579,7 @@ message ResourceRequirements { // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. + // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional map requests = 2; @@ -5629,8 +5706,12 @@ message TopologySpreadConstraint { // spreading will be calculated. The keys are used to lookup values from the // incoming pod labels, those key-value labels are ANDed with labelSelector // to select the group of existing pods over which spreading will be calculated - // for the incoming pod. Keys that don't exist in the incoming pod labels will + // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // MatchLabelKeys cannot be set when LabelSelector isn't set. + // Keys that don't exist in the incoming pod labels will // be ignored. A null or empty list means only match against labelSelector. + // + // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). // +listType=atomic // +optional repeated string matchLabelKeys = 8; diff --git a/vendor/k8s.io/api/core/v1/toleration.go b/vendor/k8s.io/api/core/v1/toleration.go index 9341abf8..e803d518 100644 --- a/vendor/k8s.io/api/core/v1/toleration.go +++ b/vendor/k8s.io/api/core/v1/toleration.go @@ -28,15 +28,13 @@ func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { // ToleratesTaint checks if the toleration tolerates the taint. // The matching follows the rules below: -// (1) Empty toleration.effect means to match all taint effects, // -// otherwise taint effect must equal to toleration.effect. -// -// (2) If toleration.operator is 'Exists', it means to match all taint values. -// (3) Empty toleration.key means to match all taint keys. -// -// If toleration.key is empty, toleration.operator must be 'Exists'; -// this combination means to match all taint values and all taint keys. +// 1. Empty toleration.effect means to match all taint effects, +// otherwise taint effect must equal to toleration.effect. +// 2. If toleration.operator is 'Exists', it means to match all taint values. +// 3. Empty toleration.key means to match all taint keys. +// If toleration.key is empty, toleration.operator must be 'Exists'; +// this combination means to match all taint values and all taint keys. func (t *Toleration) ToleratesTaint(taint *Taint) bool { if len(t.Effect) > 0 && t.Effect != taint.Effect { return false diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index bfb3b1d9..c831d596 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -577,7 +577,7 @@ const ( PersistentVolumeClaimNodeExpansionFailed PersistentVolumeClaimResizeStatus = "NodeExpansionFailed" ) -// PersistentVolumeClaimCondition contails details about state of pvc +// PersistentVolumeClaimCondition contains details about state of pvc type PersistentVolumeClaimCondition struct { Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` @@ -735,7 +735,7 @@ type EmptyDirVolumeSource struct { // The maximum usage on memory medium EmptyDir would be the minimum value between // the SizeLimit specified here and the sum of memory limits of all containers in a pod. // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` } @@ -1826,7 +1826,6 @@ type CSIPersistentVolumeSource struct { // controllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an beta field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional @@ -1835,9 +1834,10 @@ type CSIPersistentVolumeSource struct { // nodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. - // This is an alpha field and requires enabling CSINodeExpandSecret feature gate. + // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. + // +featureGate=CSINodeExpandSecret // +optional NodeExpandSecretRef *SecretReference `json:"nodeExpandSecretRef,omitempty" protobuf:"bytes,10,opt,name=nodeExpandSecretRef"` } @@ -2265,6 +2265,33 @@ const ( PullIfNotPresent PullPolicy = "IfNotPresent" ) +// ResourceResizeRestartPolicy specifies how to handle container resource resize. +type ResourceResizeRestartPolicy string + +// These are the valid resource resize restart policy values: +const ( + // 'NotRequired' means Kubernetes will try to resize the container + // without restarting it, if possible. Kubernetes may however choose to + // restart the container if it is unable to actuate resize without a + // restart. For e.g. the runtime doesn't support restart-free resizing. + NotRequired ResourceResizeRestartPolicy = "NotRequired" + // 'RestartContainer' means Kubernetes will resize the container in-place + // by stopping and starting the container when new resources are applied. + // This is needed for legacy applications. For e.g. java apps using the + // -xmxN flag which are unable to use resized memory without restarting. + RestartContainer ResourceResizeRestartPolicy = "RestartContainer" +) + +// ContainerResizePolicy represents resource resize policy for the container. +type ContainerResizePolicy struct { + // Name of the resource to which this resource resize policy applies. + // Supported values: cpu, memory. + ResourceName ResourceName `json:"resourceName" protobuf:"bytes,1,opt,name=resourceName,casttype=ResourceName"` + // Restart policy to apply when specified resource is resized. + // If not specified, it defaults to NotRequired. + RestartPolicy ResourceResizeRestartPolicy `json:"restartPolicy" protobuf:"bytes,2,opt,name=restartPolicy,casttype=ResourceResizeRestartPolicy"` +} + // PreemptionPolicy describes a policy for if/when to preempt a pod. // +enum type PreemptionPolicy string @@ -2311,7 +2338,7 @@ type ResourceRequirements struct { Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. + // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` @@ -2414,6 +2441,11 @@ type Container struct { // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional @@ -2518,8 +2550,6 @@ type ProbeHandler struct { TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` // GRPC specifies an action involving a GRPC port. - // This is a beta field and requires enabling GRPCContainerProbe feature gate. - // +featureGate=GRPCContainerProbe // +optional GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"` } @@ -2633,33 +2663,66 @@ type ContainerState struct { // ContainerStatus contains details for the current status of this container. type ContainerStatus struct { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Name is a DNS_LABEL representing the unique name of the container. + // Each container in a pod must have a unique name across all container types. // Cannot be updated. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Details about the container's current condition. + // State holds details about the container's current condition. // +optional State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"` - // Details about the container's last termination condition. + // LastTerminationState holds the last termination state of the container to + // help debug container crashes and restarts. This field is not + // populated if the container is still running and RestartCount is 0. // +optional LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` - // Specifies whether the container has passed its readiness probe. + // Ready specifies whether the container is currently passing its readiness check. + // The value will change as readiness probes keep executing. If no readiness + // probes are specified, this field defaults to true once the container is + // fully started (see Started field). + // + // The value is typically used to determine whether a container is ready to + // accept traffic. Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` - // The number of times the container has been restarted. + // RestartCount holds the number of times the container has been restarted. + // Kubelet makes an effort to always increment the value, but there + // are cases when the state may be lost due to node restarts and then the value + // may be reset to 0. The value is never negative. RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` - // The image the container is running. + // Image is the name of container image that the container is running. + // The container image may not match the image used in the PodSpec, + // as it may have been resolved by the runtime. // More info: https://kubernetes.io/docs/concepts/containers/images. Image string `json:"image" protobuf:"bytes,6,opt,name=image"` - // ImageID of the container's image. + // ImageID is the image ID of the container's image. The image ID may not + // match the image ID of the image used in the PodSpec, as it may have been + // resolved by the runtime. ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` - // Container's ID in the format '://'. + // ContainerID is the ID of the container in the format '://'. + // Where type is a container runtime identifier, returned from Version call of CRI API + // (for example "containerd"). // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` - // Specifies whether the container has passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered successful. - // Resets to false when the container is restarted, or if kubelet loses state temporarily. - // Is always true when no startupProbe is defined. + // Started indicates whether the container has finished its postStart lifecycle hook + // and passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered + // successful. Resets to false when the container is restarted, or if kubelet + // loses state temporarily. In both cases, startup probes will run again. + // Is always true when no startupProbe is defined and container is running and + // has passed the postStart lifecycle hook. The null value must be treated the + // same as false. // +optional Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"` + // AllocatedResources represents the compute resources allocated for this container by the + // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission + // and after successfully admitting desired pod resize. + // +featureGate=InPlacePodVerticalScaling + // +optional + AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,10,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"` + // Resources represents the compute resource requests and limits that have been successfully + // enacted on the running container after it has been started or has been successfully resized. + // +featureGate=InPlacePodVerticalScaling + // +optional + Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,11,opt,name=resources"` } // PodPhase is a label for the condition of a pod at the current time. @@ -2723,6 +2786,10 @@ const ( // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination // is initiated by kubelet PodReasonTerminationByKubelet = "TerminationByKubelet" + + // PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the + // disruption was initiated by scheduler's preemption. + PodReasonPreemptionByScheduler = "PreemptionByScheduler" ) // PodCondition contains details for the current condition of this pod. @@ -2748,6 +2815,20 @@ type PodCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } +// PodResizeStatus shows status of desired resize of a pod's containers. +type PodResizeStatus string + +const ( + // Pod resources resize has been requested and will be evaluated by node. + PodResizeStatusProposed PodResizeStatus = "Proposed" + // Pod resources resize has been accepted by node and is being actuated. + PodResizeStatusInProgress PodResizeStatus = "InProgress" + // Node cannot resize the pod at this time and will keep retrying. + PodResizeStatusDeferred PodResizeStatus = "Deferred" + // Requested pod resize is not feasible and will not be re-evaluated. + PodResizeStatusInfeasible PodResizeStatus = "Infeasible" +) + // RestartPolicy describes how the container should be restarted. // Only one of the following restart policies may be specified. // If none of the following policies is specified, the default one @@ -3158,7 +3239,7 @@ type PodSpec struct { // +patchStrategy=merge EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"` // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. + // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. // Default to Always. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional @@ -3384,14 +3465,19 @@ type PodSpec struct { HostUsers *bool `json:"hostUsers,omitempty" protobuf:"bytes,37,opt,name=hostUsers"` // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - // More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. + // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + // scheduler will not attempt to schedule the pod. + // + // SchedulingGates can only be set at pod creation time, and be removed only afterwards. + // + // This is a beta feature enabled by the PodSchedulingReadiness feature gate. // - // This is an alpha-level feature enabled by PodSchedulingReadiness feature gate. - // +optional // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name + // +featureGate=PodSchedulingReadiness + // +optional SchedulingGates []PodSchedulingGate `json:"schedulingGates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,38,opt,name=schedulingGates"` // ResourceClaims defines which ResourceClaims must be allocated // and reserved before the Pod is allowed to start. The resources @@ -3612,8 +3698,12 @@ type TopologySpreadConstraint struct { // spreading will be calculated. The keys are used to lookup values from the // incoming pod labels, those key-value labels are ANDed with labelSelector // to select the group of existing pods over which spreading will be calculated - // for the incoming pod. Keys that don't exist in the incoming pod labels will + // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // MatchLabelKeys cannot be set when LabelSelector isn't set. + // Keys that don't exist in the incoming pod labels will // be ignored. A null or empty list means only match against labelSelector. + // + // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). // +listType=atomic // +optional MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,8,opt,name=matchLabelKeys"` @@ -3881,6 +3971,11 @@ type EphemeralContainerCommon struct { // already allocated to the pod. // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional @@ -4066,12 +4161,19 @@ type PodStatus struct { ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` // Status for any ephemeral containers that have run in this pod. // +optional EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` + + // Status of resources resize desired for pod's containers. + // It is empty if no resources resize is pending. + // Any changes to container resources will automatically set this to "Proposed" + // +featureGate=InPlacePodVerticalScaling + // +optional + Resize PodResizeStatus `json:"resize,omitempty" protobuf:"bytes,14,opt,name=resize,casttype=PodResizeStatus"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -4210,6 +4312,7 @@ type ReplicationControllerSpec struct { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` @@ -4370,34 +4473,47 @@ const ( ServiceTypeExternalName ServiceType = "ExternalName" ) -// ServiceInternalTrafficPolicyType describes how nodes distribute service traffic they +// ServiceInternalTrafficPolicy describes how nodes distribute service traffic they // receive on the ClusterIP. // +enum -type ServiceInternalTrafficPolicyType string +type ServiceInternalTrafficPolicy string const ( // ServiceInternalTrafficPolicyCluster routes traffic to all endpoints. - ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicyType = "Cluster" + ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicy = "Cluster" // ServiceInternalTrafficPolicyLocal routes traffic only to endpoints on the same // node as the client pod (dropping the traffic if there are no local endpoints). - ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicyType = "Local" + ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicy = "Local" ) -// ServiceExternalTrafficPolicyType describes how nodes distribute service traffic they +// for backwards compat +// +enum +type ServiceInternalTrafficPolicyType = ServiceInternalTrafficPolicy + +// ServiceExternalTrafficPolicy describes how nodes distribute service traffic they // receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, -// and LoadBalancer IPs). +// and LoadBalancer IPs. // +enum -type ServiceExternalTrafficPolicyType string +type ServiceExternalTrafficPolicy string const ( - // ServiceExternalTrafficPolicyTypeCluster routes traffic to all endpoints. - ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" + // ServiceExternalTrafficPolicyCluster routes traffic to all endpoints. + ServiceExternalTrafficPolicyCluster ServiceExternalTrafficPolicy = "Cluster" - // ServiceExternalTrafficPolicyTypeLocal preserves the source IP of the traffic by + // ServiceExternalTrafficPolicyLocal preserves the source IP of the traffic by // routing only to endpoints on the same node as the traffic was received on // (dropping the traffic if there are no local endpoints). - ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + ServiceExternalTrafficPolicyLocal ServiceExternalTrafficPolicy = "Local" +) + +// for backwards compat +// +enum +type ServiceExternalTrafficPolicyType = ServiceExternalTrafficPolicy + +const ( + ServiceExternalTrafficPolicyTypeLocal = ServiceExternalTrafficPolicyLocal + ServiceExternalTrafficPolicyTypeCluster = ServiceExternalTrafficPolicyCluster ) // These are the valid conditions of a service. @@ -4633,7 +4749,7 @@ type ServiceSpec struct { // a NodePort from within the cluster may need to take traffic policy into account // when picking a node. // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` + ExternalTrafficPolicy ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` // healthCheckNodePort specifies the healthcheck nodePort for the service. // This only applies when type is set to LoadBalancer and @@ -4730,7 +4846,7 @@ type ServiceSpec struct { // "Cluster", uses the standard behavior of routing to all endpoints evenly // (possibly modified by topology and other features). // +optional - InternalTrafficPolicy *ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` + InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` } // ServicePort contains information on service's port. @@ -4951,11 +5067,8 @@ type EndpointSubset struct { // +structType=atomic type EndpointAddress struct { // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` // The Hostname of this endpoint // +optional @@ -4988,10 +5101,17 @@ type EndpointPort struct { Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"` @@ -5210,6 +5330,10 @@ type NodeStatus struct { // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. + // Consumers should assume that addresses can change during the + // lifetime of a Node. However, there are some exceptions where this may not + // be possible, such as Pods that inherit a Node's address in its own status or + // consumers of the downward API (status.hostIP). // +optional // +patchMergeKey=type // +patchStrategy=merge diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 76767497..a01ae371 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AWSElasticBlockStoreVolumeSource = map[string]string{ @@ -126,8 +126,8 @@ var map_CSIPersistentVolumeSource = map[string]string{ "controllerPublishSecretRef": "controllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodeStageSecretRef": "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodePublishSecretRef": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an beta field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is an alpha field and requires enabling CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is a beta field which is enabled default by CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -346,6 +346,7 @@ var map_Container = map[string]string{ "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "resizePolicy": "Resources resize policy for the container.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", @@ -388,6 +389,16 @@ func (ContainerPort) SwaggerDoc() map[string]string { return map_ContainerPort } +var map_ContainerResizePolicy = map[string]string{ + "": "ContainerResizePolicy represents resource resize policy for the container.", + "resourceName": "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.", + "restartPolicy": "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.", +} + +func (ContainerResizePolicy) SwaggerDoc() map[string]string { + return map_ContainerResizePolicy +} + var map_ContainerState = map[string]string{ "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", "waiting": "Details about a waiting container", @@ -434,16 +445,18 @@ func (ContainerStateWaiting) SwaggerDoc() map[string]string { } var map_ContainerStatus = map[string]string{ - "": "ContainerStatus contains details for the current status of this container.", - "name": "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", - "state": "Details about the container's current condition.", - "lastState": "Details about the container's last termination condition.", - "ready": "Specifies whether the container has passed its readiness probe.", - "restartCount": "The number of times the container has been restarted.", - "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images.", - "imageID": "ImageID of the container's image.", - "containerID": "Container's ID in the format '://'.", - "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", + "": "ContainerStatus contains details for the current status of this container.", + "name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", + "state": "State holds details about the container's current condition.", + "lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.", + "ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", + "restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", + "image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", + "imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", + "containerID": "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", + "started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", + "allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", + "resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -493,7 +506,7 @@ func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { var map_EmptyDirVolumeSource = map[string]string{ "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", "medium": "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - "sizeLimit": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", + "sizeLimit": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", } func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { @@ -502,7 +515,7 @@ func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { var map_EndpointAddress = map[string]string{ "": "EndpointAddress is a tuple that describes single IP address.", - "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", + "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).", "hostname": "The Hostname of this endpoint", "nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", "targetRef": "Reference to object providing the endpoint.", @@ -517,7 +530,7 @@ var map_EndpointPort = map[string]string{ "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -609,6 +622,7 @@ var map_EphemeralContainerCommon = map[string]string{ "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + "resizePolicy": "Resources resize policy for the container.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Probes are not allowed for ephemeral containers.", @@ -1213,7 +1227,7 @@ var map_NodeStatus = map[string]string{ "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example.", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", "daemonEndpoints": "Endpoints of daemons running on the Node.", "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", "images": "List of container images on this node", @@ -1292,7 +1306,7 @@ func (PersistentVolumeClaim) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimCondition = map[string]string{ - "": "PersistentVolumeClaimCondition contails details about state of pvc", + "": "PersistentVolumeClaimCondition contains details about state of pvc", "lastProbeTime": "lastProbeTime is the time we probed the condition.", "lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.", "reason": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", @@ -1668,7 +1682,7 @@ var map_PodSpec = map[string]string{ "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.", - "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", + "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", "dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", @@ -1701,7 +1715,7 @@ var map_PodSpec = map[string]string{ "setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", "hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", - "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness.\n\nThis is an alpha-level feature enabled by PodSchedulingReadiness feature gate.", + "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\n\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate.", "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", } @@ -1722,8 +1736,9 @@ var map_PodStatus = map[string]string{ "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "containerStatuses": "The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod.", + "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1841,7 +1856,7 @@ var map_ProbeHandler = map[string]string{ "exec": "Exec specifies the action to take.", "httpGet": "HTTPGet specifies the http request to perform.", "tcpSocket": "TCPSocket specifies an action involving a TCP port.", - "grpc": "GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.", + "grpc": "GRPC specifies an action involving a GRPC port.", } func (ProbeHandler) SwaggerDoc() map[string]string { @@ -1954,7 +1969,7 @@ var map_ReplicationControllerSpec = map[string]string{ "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", } func (ReplicationControllerSpec) SwaggerDoc() map[string]string { @@ -2040,7 +2055,7 @@ func (ResourceQuotaStatus) SwaggerDoc() map[string]string { var map_ResourceRequirements = map[string]string{ "": "ResourceRequirements describes the compute resource requirements.", "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "claims": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", } @@ -2450,7 +2465,7 @@ var map_TopologySpreadConstraint = map[string]string{ "minDomains": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ", "nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", "nodeTaintsPolicy": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", - "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).", } func (TopologySpreadConstraint) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 2bf1c8ad..bfb7e0bf 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -788,6 +788,11 @@ func (in *Container) DeepCopyInto(out *Container) { } } in.Resources.DeepCopyInto(&out.Resources) + if in.ResizePolicy != nil { + in, out := &in.ResizePolicy, &out.ResizePolicy + *out = make([]ContainerResizePolicy, len(*in)) + copy(*out, *in) + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -875,6 +880,22 @@ func (in *ContainerPort) DeepCopy() *ContainerPort { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResizePolicy) DeepCopyInto(out *ContainerResizePolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResizePolicy. +func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy { + if in == nil { + return nil + } + out := new(ContainerResizePolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerState) DeepCopyInto(out *ContainerState) { *out = *in @@ -967,6 +988,18 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { *out = new(bool) **out = **in } + if in.AllocatedResources != nil { + in, out := &in.AllocatedResources, &out.AllocatedResources + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } @@ -1382,6 +1415,11 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) } } in.Resources.DeepCopyInto(&out.Resources) + if in.ResizePolicy != nil { + in, out := &in.ResizePolicy, &out.ResizePolicy + *out = make([]ContainerResizePolicy, len(*in)) + copy(*out, *in) + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -5517,7 +5555,7 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { } if in.InternalTrafficPolicy != nil { in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy - *out = new(ServiceInternalTrafficPolicyType) + *out = new(ServiceInternalTrafficPolicy) **out = **in } return diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto index 9cbe4639..b7150ef2 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1/generated.proto @@ -86,7 +86,9 @@ message EndpointConditions { // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this // unknown state as ready. For compatibility reasons, ready should never be - // "true" for terminating endpoints. + // "true" for terminating endpoints, except when the normal readiness + // behavior is being explicitly overridden, for example when the associated + // Service has set the publishNotReadyAddresses flag. // +optional optional bool ready = 1; @@ -115,9 +117,8 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic message EndpointPort { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -125,21 +126,28 @@ message EndpointPort { // Default is empty string. optional string name = 1; - // The IP protocol for this port. + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. optional string protocol = 2; - // The port number of the endpoint. + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional optional string appProtocol = 4; @@ -183,7 +191,7 @@ message EndpointSliceList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of endpoint slices + // items is the list of endpoint slices repeated EndpointSlice items = 2; } diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go index 2df80c3d..9b4daafc 100644 --- a/vendor/k8s.io/api/discovery/v1/types.go +++ b/vendor/k8s.io/api/discovery/v1/types.go @@ -29,9 +29,11 @@ import ( // labels, which must be joined to produce the full set of endpoints. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is // immutable after creation. The following address types are currently @@ -40,10 +42,12 @@ type EndpointSlice struct { // * IPv6: Represents an IPv6 Address. // * FQDN: Represents a Fully Qualified Domain Name. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in // this slice. Each port must have a unique name. When ports is empty, it // indicates that there are no defined ports. When a port is defined with a @@ -61,8 +65,10 @@ type AddressType string const ( // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. AddressTypeFQDN = AddressType("FQDN") ) @@ -77,8 +83,10 @@ type Endpoint struct { // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered @@ -86,6 +94,7 @@ type Endpoint struct { // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional @@ -104,9 +113,11 @@ type Endpoint struct { // be used to determine endpoints local to a Node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + // zone is the name of the Zone this endpoint exists in. // +optional Zone *string `json:"zone,omitempty" protobuf:"bytes,7,opt,name=zone"` + // hints contains information associated with how an endpoint should be // consumed. // +optional @@ -119,7 +130,9 @@ type EndpointConditions struct { // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this // unknown state as ready. For compatibility reasons, ready should never be - // "true" for terminating endpoints. + // "true" for terminating endpoints, except when the normal readiness + // behavior is being explicitly overridden, for example when the associated + // Service has set the publishNotReadyAddresses flag. // +optional Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` @@ -154,28 +167,37 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic type EndpointPort struct { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - // The IP protocol for this port. + + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` - // The port number of the endpoint. + + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` + // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"` @@ -186,9 +208,11 @@ type EndpointPort struct { // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of endpoint slices + + // items is the list of endpoint slices Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go index 746408b6..c780c957 100644 --- a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ @@ -45,7 +45,7 @@ func (Endpoint) SwaggerDoc() map[string]string { var map_EndpointConditions = map[string]string{ "": "EndpointConditions represents the current condition of an endpoint.", - "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag.", "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition.", "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating.", } @@ -65,10 +65,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -90,7 +90,7 @@ func (EndpointSlice) SwaggerDoc() map[string]string { var map_EndpointSliceList = map[string]string{ "": "EndpointSliceList represents a list of endpoint slices", "metadata": "Standard list metadata.", - "items": "List of endpoint slices", + "items": "items is the list of endpoint slices", } func (EndpointSliceList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index 2979e64a..8b6c360b 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -118,9 +118,8 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice message EndpointPort { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -128,17 +127,17 @@ message EndpointPort { // Default is empty string. optional string name = 1; - // The IP protocol for this port. + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. optional string protocol = 2; - // The port number of the endpoint. + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; - // The application protocol for this port. + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). @@ -186,7 +185,7 @@ message EndpointSliceList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of endpoint slices + // items is the list of endpoint slices repeated EndpointSlice items = 2; } diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go index 7a02bead..f09f7f32 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -33,9 +33,11 @@ import ( // labels, which must be joined to produce the full set of endpoints. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is // immutable after creation. The following address types are currently @@ -44,10 +46,12 @@ type EndpointSlice struct { // * IPv6: Represents an IPv6 Address. // * FQDN: Represents a Fully Qualified Domain Name. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in // this slice. Each port must have a unique name. When ports is empty, it // indicates that there are no defined ports. When a port is defined with a @@ -64,8 +68,10 @@ type AddressType string const ( // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. AddressTypeFQDN = AddressType("FQDN") ) @@ -80,8 +86,10 @@ type Endpoint struct { // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered @@ -89,10 +97,12 @@ type Endpoint struct { // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"` + // topology contains arbitrary topology information associated with the // endpoint. These key/value pairs must conform with the label format. // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels @@ -108,10 +118,12 @@ type Endpoint struct { // This field is deprecated and will be removed in future api versions. // +optional Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + // nodeName represents the name of the Node hosting this endpoint. This can // be used to determine endpoints local to a Node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + // hints contains information associated with how an endpoint should be // consumed. // +featureGate=TopologyAwareHints @@ -159,24 +171,26 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice type EndpointPort struct { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - // The IP protocol for this port. + + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` - // The port number of the endpoint. + + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` - // The application protocol for this port. + + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). @@ -195,9 +209,11 @@ type EndpointPort struct { // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of endpoint slices + + // items is the list of endpoint slices Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index e1c974b3..b1d4c306 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ @@ -64,10 +64,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "appProtocol represents the application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -89,7 +89,7 @@ func (EndpointSlice) SwaggerDoc() map[string]string { var map_EndpointSliceList = map[string]string{ "": "EndpointSliceList represents a list of endpoint slices", "metadata": "Standard list metadata.", - "items": "List of endpoint slices", + "items": "items is the list of endpoint slices", } func (EndpointSliceList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go index 797da63b..44ac0c3b 100644 --- a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index 0e6bd5a8..e6c28a4f 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 333142b3..863ebbc4 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -49,94 +49,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{0} -} -func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedCSIDriver.Merge(m, src) -} -func (m *AllowedCSIDriver) XXX_Size() int { - return m.Size() -} -func (m *AllowedCSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo - -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{1} -} -func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedFlexVolume.Merge(m, src) -} -func (m *AllowedFlexVolume) XXX_Size() int { - return m.Size() -} -func (m *AllowedFlexVolume) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{2} -} -func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedHostPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedHostPath.Merge(m, src) -} -func (m *AllowedHostPath) XXX_Size() int { - return m.Size() -} -func (m *AllowedHostPath) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo - func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{3} + return fileDescriptor_cdc93917efc28165, []int{0} } func (m *DaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +80,7 @@ var xxx_messageInfo_DaemonSet proto.InternalMessageInfo func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{4} + return fileDescriptor_cdc93917efc28165, []int{1} } func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +108,7 @@ var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{5} + return fileDescriptor_cdc93917efc28165, []int{2} } func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +136,7 @@ var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{6} + return fileDescriptor_cdc93917efc28165, []int{3} } func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +164,7 @@ var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{7} + return fileDescriptor_cdc93917efc28165, []int{4} } func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +192,7 @@ var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{8} + return fileDescriptor_cdc93917efc28165, []int{5} } func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +220,7 @@ var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{9} + return fileDescriptor_cdc93917efc28165, []int{6} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +248,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{10} + return fileDescriptor_cdc93917efc28165, []int{7} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +276,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{11} + return fileDescriptor_cdc93917efc28165, []int{8} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +304,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} func (*DeploymentRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{12} + return fileDescriptor_cdc93917efc28165, []int{9} } func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +332,7 @@ var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{13} + return fileDescriptor_cdc93917efc28165, []int{10} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +360,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{14} + return fileDescriptor_cdc93917efc28165, []int{11} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +388,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{15} + return fileDescriptor_cdc93917efc28165, []int{12} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,38 +413,10 @@ func (m *DeploymentStrategy) XXX_DiscardUnknown() { var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{16} -} -func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) -} -func (m *FSGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo - func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{17} + return fileDescriptor_cdc93917efc28165, []int{13} } func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +444,7 @@ var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{18} + return fileDescriptor_cdc93917efc28165, []int{14} } func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -581,66 +469,10 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{19} -} -func (m *HostPortRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostPortRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostPortRange.Merge(m, src) -} -func (m *HostPortRange) XXX_Size() int { - return m.Size() -} -func (m *HostPortRange) XXX_DiscardUnknown() { - xxx_messageInfo_HostPortRange.DiscardUnknown(m) -} - -var xxx_messageInfo_HostPortRange proto.InternalMessageInfo - -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{20} -} -func (m *IDRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IDRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_IDRange.Merge(m, src) -} -func (m *IDRange) XXX_Size() int { - return m.Size() -} -func (m *IDRange) XXX_DiscardUnknown() { - xxx_messageInfo_IDRange.DiscardUnknown(m) -} - -var xxx_messageInfo_IDRange proto.InternalMessageInfo - func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} func (*IPBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{21} + return fileDescriptor_cdc93917efc28165, []int{15} } func (m *IPBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +500,7 @@ var xxx_messageInfo_IPBlock proto.InternalMessageInfo func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{22} + return fileDescriptor_cdc93917efc28165, []int{16} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +528,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{23} + return fileDescriptor_cdc93917efc28165, []int{17} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +556,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{24} + return fileDescriptor_cdc93917efc28165, []int{18} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +584,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } func (*IngressLoadBalancerIngress) ProtoMessage() {} func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{25} + return fileDescriptor_cdc93917efc28165, []int{19} } func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +612,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } func (*IngressLoadBalancerStatus) ProtoMessage() {} func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{26} + return fileDescriptor_cdc93917efc28165, []int{20} } func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +640,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } func (*IngressPortStatus) ProtoMessage() {} func (*IngressPortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{27} + return fileDescriptor_cdc93917efc28165, []int{21} } func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +668,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{28} + return fileDescriptor_cdc93917efc28165, []int{22} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +696,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{29} + return fileDescriptor_cdc93917efc28165, []int{23} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +724,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{30} + return fileDescriptor_cdc93917efc28165, []int{24} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +752,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{31} + return fileDescriptor_cdc93917efc28165, []int{25} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +780,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{32} + return fileDescriptor_cdc93917efc28165, []int{26} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +808,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{33} + return fileDescriptor_cdc93917efc28165, []int{27} } func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +836,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{34} + return fileDescriptor_cdc93917efc28165, []int{28} } func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +864,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{35} + return fileDescriptor_cdc93917efc28165, []int{29} } func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +892,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{36} + return fileDescriptor_cdc93917efc28165, []int{30} } func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +920,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{37} + return fileDescriptor_cdc93917efc28165, []int{31} } func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +948,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{38} + return fileDescriptor_cdc93917efc28165, []int{32} } func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +976,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{39} + return fileDescriptor_cdc93917efc28165, []int{33} } func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1004,7 @@ var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo func (m *NetworkPolicyStatus) Reset() { *m = NetworkPolicyStatus{} } func (*NetworkPolicyStatus) ProtoMessage() {} func (*NetworkPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{40} + return fileDescriptor_cdc93917efc28165, []int{34} } func (m *NetworkPolicyStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1197,94 +1029,10 @@ func (m *NetworkPolicyStatus) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkPolicyStatus proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{41} -} -func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicy.Merge(m, src) -} -func (m *PodSecurityPolicy) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{42} -} -func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) -} -func (m *PodSecurityPolicyList) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{43} -} -func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) -} -func (m *PodSecurityPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo - func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{44} + return fileDescriptor_cdc93917efc28165, []int{35} } func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1060,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{45} + return fileDescriptor_cdc93917efc28165, []int{36} } func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1088,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{46} + return fileDescriptor_cdc93917efc28165, []int{37} } func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1116,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{47} + return fileDescriptor_cdc93917efc28165, []int{38} } func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1144,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{48} + return fileDescriptor_cdc93917efc28165, []int{39} } func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1172,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{49} + return fileDescriptor_cdc93917efc28165, []int{40} } func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1200,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{50} + return fileDescriptor_cdc93917efc28165, []int{41} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1228,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{51} + return fileDescriptor_cdc93917efc28165, []int{42} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1505,15 +1253,15 @@ func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{52} +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{43} } -func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { +func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -1521,27 +1269,27 @@ func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([ } return b[:n], nil } -func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) } -func (m *RunAsGroupStrategyOptions) XXX_Size() int { +func (m *Scale) XXX_Size() int { return m.Size() } -func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) } -var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo +var xxx_messageInfo_Scale proto.InternalMessageInfo -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{53} +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{44} } -func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -1549,126 +1297,14 @@ func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([] } return b[:n], nil } -func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) } -func (m *RunAsUserStrategyOptions) XXX_Size() int { +func (m *ScaleSpec) XXX_Size() int { return m.Size() } -func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo - -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{54} -} -func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) -} -func (m *RuntimeClassStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{55} -} -func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) -} -func (m *SELinuxStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{56} -} -func (m *Scale) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Scale) XXX_Merge(src proto.Message) { - xxx_messageInfo_Scale.Merge(m, src) -} -func (m *Scale) XXX_Size() int { - return m.Size() -} -func (m *Scale) XXX_DiscardUnknown() { - xxx_messageInfo_Scale.DiscardUnknown(m) -} - -var xxx_messageInfo_Scale proto.InternalMessageInfo - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{57} -} -func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScaleSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleSpec.Merge(m, src) -} -func (m *ScaleSpec) XXX_Size() int { - return m.Size() -} -func (m *ScaleSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) } var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo @@ -1676,7 +1312,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{58} + return fileDescriptor_cdc93917efc28165, []int{45} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1701,38 +1337,7 @@ func (m *ScaleStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} -func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{59} -} -func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo - func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") - proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") - proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") @@ -1747,11 +1352,8 @@ func init() { proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.FSGroupStrategyOptions") proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath") proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.extensions.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.api.extensions.v1beta1.IDRange") proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock") proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress") proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend") @@ -1772,9 +1374,6 @@ func init() { proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") proto.RegisterType((*NetworkPolicyStatus)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyStatus") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicySpec") proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") @@ -1783,15 +1382,10 @@ func init() { proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RuntimeClassStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } func init() { @@ -1799,344 +1393,188 @@ func init() { } var fileDescriptor_cdc93917efc28165 = []byte{ - // 3920 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5c, 0x4b, 0x6c, 0x1c, 0xd9, - 0x75, 0x55, 0x75, 0x93, 0xec, 0xe6, 0xa5, 0xf8, 0x7b, 0xa4, 0xc8, 0x1e, 0xca, 0x62, 0xcb, 0x35, - 0xc8, 0x44, 0x33, 0xd1, 0x74, 0x5b, 0x1c, 0x49, 0x1e, 0x8f, 0x10, 0x7b, 0xd8, 0xfc, 0x48, 0xb4, - 0xf9, 0xe9, 0x79, 0x4d, 0xca, 0xc6, 0x20, 0xe3, 0xb8, 0x58, 0xfd, 0xd8, 0xac, 0x61, 0x75, 0x55, - 0xa5, 0xaa, 0x9a, 0x66, 0x07, 0x59, 0x24, 0x48, 0x36, 0x06, 0x02, 0x24, 0x1b, 0x27, 0x59, 0x66, - 0x60, 0x20, 0xbb, 0x20, 0xcb, 0x64, 0xe1, 0x18, 0x09, 0xe2, 0x00, 0x42, 0xe0, 0x04, 0x06, 0xb2, - 0x88, 0x57, 0x44, 0x86, 0x5e, 0x05, 0x59, 0x65, 0x17, 0x68, 0x15, 0xbc, 0x4f, 0xfd, 0xab, 0xd8, - 0xd5, 0x8c, 0x44, 0xc4, 0x81, 0x57, 0x62, 0xbd, 0x7b, 0xef, 0x79, 0xbf, 0xfb, 0xee, 0x3d, 0xef, - 0xd3, 0x82, 0xcd, 0x93, 0xf7, 0x9d, 0x9a, 0x66, 0xd6, 0x4f, 0x7a, 0x87, 0xc4, 0x36, 0x88, 0x4b, - 0x9c, 0xfa, 0x29, 0x31, 0xda, 0xa6, 0x5d, 0x17, 0x02, 0xc5, 0xd2, 0xea, 0xe4, 0xcc, 0x25, 0x86, - 0xa3, 0x99, 0x86, 0x53, 0x3f, 0x7d, 0x70, 0x48, 0x5c, 0xe5, 0x41, 0xbd, 0x43, 0x0c, 0x62, 0x2b, - 0x2e, 0x69, 0xd7, 0x2c, 0xdb, 0x74, 0x4d, 0x74, 0x87, 0xab, 0xd7, 0x14, 0x4b, 0xab, 0x05, 0xea, - 0x35, 0xa1, 0xbe, 0xf4, 0x6e, 0x47, 0x73, 0x8f, 0x7b, 0x87, 0x35, 0xd5, 0xec, 0xd6, 0x3b, 0x66, - 0xc7, 0xac, 0x33, 0xab, 0xc3, 0xde, 0x11, 0xfb, 0x62, 0x1f, 0xec, 0x2f, 0x8e, 0xb6, 0x24, 0x87, - 0x2a, 0x57, 0x4d, 0x9b, 0xd4, 0x4f, 0x13, 0x35, 0x2e, 0x3d, 0x0c, 0x74, 0xba, 0x8a, 0x7a, 0xac, - 0x19, 0xc4, 0xee, 0xd7, 0xad, 0x93, 0x0e, 0x2d, 0x70, 0xea, 0x5d, 0xe2, 0x2a, 0x69, 0x56, 0xf5, - 0x2c, 0x2b, 0xbb, 0x67, 0xb8, 0x5a, 0x97, 0x24, 0x0c, 0x1e, 0x0f, 0x32, 0x70, 0xd4, 0x63, 0xd2, - 0x55, 0x12, 0x76, 0xef, 0x65, 0xd9, 0xf5, 0x5c, 0x4d, 0xaf, 0x6b, 0x86, 0xeb, 0xb8, 0x76, 0xdc, - 0x48, 0x7e, 0x08, 0x33, 0xab, 0xba, 0x6e, 0x7e, 0x97, 0xb4, 0xd7, 0x5a, 0x5b, 0xeb, 0xb6, 0x76, - 0x4a, 0x6c, 0x74, 0x17, 0x46, 0x0c, 0xa5, 0x4b, 0x2a, 0xd2, 0x5d, 0xe9, 0xde, 0x78, 0xe3, 0xe6, - 0x8b, 0xf3, 0xea, 0x8d, 0x8b, 0xf3, 0xea, 0xc8, 0xae, 0xd2, 0x25, 0x98, 0x49, 0xe4, 0x27, 0x30, - 0x2b, 0xac, 0x36, 0x75, 0x72, 0xf6, 0xdc, 0xd4, 0x7b, 0x5d, 0x82, 0xde, 0x82, 0xb1, 0x36, 0x03, - 0x10, 0x86, 0x53, 0xc2, 0x70, 0x8c, 0xc3, 0x62, 0x21, 0x95, 0x1d, 0x98, 0x16, 0xc6, 0xcf, 0x4c, - 0xc7, 0x6d, 0x2a, 0xee, 0x31, 0x5a, 0x01, 0xb0, 0x14, 0xf7, 0xb8, 0x69, 0x93, 0x23, 0xed, 0x4c, - 0x98, 0x23, 0x61, 0x0e, 0x4d, 0x5f, 0x82, 0x43, 0x5a, 0xe8, 0x3e, 0x94, 0x6d, 0xa2, 0xb4, 0xf7, - 0x0c, 0xbd, 0x5f, 0x29, 0xdc, 0x95, 0xee, 0x95, 0x1b, 0x33, 0xc2, 0xa2, 0x8c, 0x45, 0x39, 0xf6, - 0x35, 0xe4, 0xef, 0x17, 0x60, 0x7c, 0x5d, 0x21, 0x5d, 0xd3, 0x68, 0x11, 0x17, 0x7d, 0x07, 0xca, - 0x74, 0xba, 0xda, 0x8a, 0xab, 0xb0, 0xda, 0x26, 0x56, 0xbe, 0x54, 0x0b, 0xdc, 0xc9, 0x1f, 0xbd, - 0x9a, 0x75, 0xd2, 0xa1, 0x05, 0x4e, 0x8d, 0x6a, 0xd7, 0x4e, 0x1f, 0xd4, 0xf6, 0x0e, 0x3f, 0x25, - 0xaa, 0xbb, 0x43, 0x5c, 0x25, 0x68, 0x5f, 0x50, 0x86, 0x7d, 0x54, 0xb4, 0x0b, 0x23, 0x8e, 0x45, - 0x54, 0xd6, 0xb2, 0x89, 0x95, 0xfb, 0xb5, 0x4b, 0x9d, 0xb5, 0xe6, 0xb7, 0xac, 0x65, 0x11, 0x35, - 0x18, 0x71, 0xfa, 0x85, 0x19, 0x0e, 0x7a, 0x0e, 0x63, 0x8e, 0xab, 0xb8, 0x3d, 0xa7, 0x52, 0x64, - 0x88, 0xb5, 0xdc, 0x88, 0xcc, 0x2a, 0x98, 0x0c, 0xfe, 0x8d, 0x05, 0x9a, 0xfc, 0x1f, 0x05, 0x40, - 0xbe, 0xee, 0x9a, 0x69, 0xb4, 0x35, 0x57, 0x33, 0x0d, 0xf4, 0x01, 0x8c, 0xb8, 0x7d, 0xcb, 0x73, - 0x81, 0xb7, 0xbc, 0x06, 0xed, 0xf7, 0x2d, 0xf2, 0xf2, 0xbc, 0xba, 0x90, 0xb4, 0xa0, 0x12, 0xcc, - 0x6c, 0xd0, 0xb6, 0xdf, 0xd4, 0x02, 0xb3, 0x7e, 0x18, 0xad, 0xfa, 0xe5, 0x79, 0x35, 0x65, 0xb1, - 0xd5, 0x7c, 0xa4, 0x68, 0x03, 0xd1, 0x29, 0x20, 0x5d, 0x71, 0xdc, 0x7d, 0x5b, 0x31, 0x1c, 0x5e, - 0x93, 0xd6, 0x25, 0x62, 0x10, 0xde, 0xc9, 0x37, 0x69, 0xd4, 0xa2, 0xb1, 0x24, 0x5a, 0x81, 0xb6, - 0x13, 0x68, 0x38, 0xa5, 0x06, 0xea, 0xcd, 0x36, 0x51, 0x1c, 0xd3, 0xa8, 0x8c, 0x44, 0xbd, 0x19, - 0xb3, 0x52, 0x2c, 0xa4, 0xe8, 0x6d, 0x28, 0x75, 0x89, 0xe3, 0x28, 0x1d, 0x52, 0x19, 0x65, 0x8a, - 0xd3, 0x42, 0xb1, 0xb4, 0xc3, 0x8b, 0xb1, 0x27, 0x97, 0x7f, 0x28, 0xc1, 0xa4, 0x3f, 0x72, 0xdb, - 0x9a, 0xe3, 0xa2, 0xdf, 0x48, 0xf8, 0x61, 0x2d, 0x5f, 0x97, 0xa8, 0x35, 0xf3, 0x42, 0xdf, 0xe7, - 0xbd, 0x92, 0x90, 0x0f, 0xee, 0xc0, 0xa8, 0xe6, 0x92, 0x2e, 0x9d, 0x87, 0xe2, 0xbd, 0x89, 0x95, - 0x7b, 0x79, 0x5d, 0xa6, 0x31, 0x29, 0x40, 0x47, 0xb7, 0xa8, 0x39, 0xe6, 0x28, 0xf2, 0x9f, 0x8c, - 0x84, 0x9a, 0x4f, 0x5d, 0x13, 0x7d, 0x02, 0x65, 0x87, 0xe8, 0x44, 0x75, 0x4d, 0x5b, 0x34, 0xff, - 0xbd, 0x9c, 0xcd, 0x57, 0x0e, 0x89, 0xde, 0x12, 0xa6, 0x8d, 0x9b, 0xb4, 0xfd, 0xde, 0x17, 0xf6, - 0x21, 0xd1, 0x47, 0x50, 0x76, 0x49, 0xd7, 0xd2, 0x15, 0x97, 0x88, 0x75, 0xf4, 0x66, 0xb8, 0x0b, - 0xd4, 0x73, 0x28, 0x58, 0xd3, 0x6c, 0xef, 0x0b, 0x35, 0xb6, 0x7c, 0xfc, 0x21, 0xf1, 0x4a, 0xb1, - 0x0f, 0x83, 0x4e, 0x61, 0xaa, 0x67, 0xb5, 0xa9, 0xa6, 0x4b, 0xa3, 0x60, 0xa7, 0x2f, 0x3c, 0xe9, - 0x71, 0xde, 0xb1, 0x39, 0x88, 0x58, 0x37, 0x16, 0x44, 0x5d, 0x53, 0xd1, 0x72, 0x1c, 0xab, 0x05, - 0xad, 0xc2, 0x74, 0x57, 0x33, 0x68, 0x5c, 0xea, 0xb7, 0x88, 0x6a, 0x1a, 0x6d, 0x87, 0xb9, 0xd5, - 0x68, 0x63, 0x51, 0x00, 0x4c, 0xef, 0x44, 0xc5, 0x38, 0xae, 0x8f, 0xbe, 0x0e, 0xc8, 0xeb, 0xc6, - 0x53, 0x1e, 0xc4, 0x35, 0xd3, 0x60, 0x3e, 0x57, 0x0c, 0x9c, 0x7b, 0x3f, 0xa1, 0x81, 0x53, 0xac, - 0xd0, 0x36, 0xcc, 0xdb, 0xe4, 0x54, 0xa3, 0x7d, 0x7c, 0xa6, 0x39, 0xae, 0x69, 0xf7, 0xb7, 0xb5, - 0xae, 0xe6, 0x56, 0xc6, 0x58, 0x9b, 0x2a, 0x17, 0xe7, 0xd5, 0x79, 0x9c, 0x22, 0xc7, 0xa9, 0x56, - 0xf2, 0x9f, 0x8e, 0xc1, 0x74, 0x2c, 0xde, 0xa0, 0xe7, 0xb0, 0xa0, 0xf6, 0x6c, 0x9b, 0x18, 0xee, - 0x6e, 0xaf, 0x7b, 0x48, 0xec, 0x96, 0x7a, 0x4c, 0xda, 0x3d, 0x9d, 0xb4, 0x99, 0xa3, 0x8c, 0x36, - 0x96, 0x45, 0x8b, 0x17, 0xd6, 0x52, 0xb5, 0x70, 0x86, 0x35, 0x1d, 0x05, 0x83, 0x15, 0xed, 0x68, - 0x8e, 0xe3, 0x63, 0x16, 0x18, 0xa6, 0x3f, 0x0a, 0xbb, 0x09, 0x0d, 0x9c, 0x62, 0x45, 0xdb, 0xd8, - 0x26, 0x8e, 0x66, 0x93, 0x76, 0xbc, 0x8d, 0xc5, 0x68, 0x1b, 0xd7, 0x53, 0xb5, 0x70, 0x86, 0x35, - 0x7a, 0x04, 0x13, 0xbc, 0x36, 0x36, 0x7f, 0x62, 0xa2, 0xe7, 0x04, 0xd8, 0xc4, 0x6e, 0x20, 0xc2, - 0x61, 0x3d, 0xda, 0x35, 0xf3, 0xd0, 0x21, 0xf6, 0x29, 0x69, 0x67, 0x4f, 0xf0, 0x5e, 0x42, 0x03, - 0xa7, 0x58, 0xd1, 0xae, 0x71, 0x0f, 0x4c, 0x74, 0x6d, 0x2c, 0xda, 0xb5, 0x83, 0x54, 0x2d, 0x9c, - 0x61, 0x4d, 0xfd, 0x98, 0x37, 0x79, 0xf5, 0x54, 0xd1, 0x74, 0xe5, 0x50, 0x27, 0x95, 0x52, 0xd4, - 0x8f, 0x77, 0xa3, 0x62, 0x1c, 0xd7, 0x47, 0x4f, 0x61, 0x96, 0x17, 0x1d, 0x18, 0x8a, 0x0f, 0x52, - 0x66, 0x20, 0x6f, 0x08, 0x90, 0xd9, 0xdd, 0xb8, 0x02, 0x4e, 0xda, 0xa0, 0x0f, 0x60, 0x4a, 0x35, - 0x75, 0x9d, 0xf9, 0xe3, 0x9a, 0xd9, 0x33, 0xdc, 0xca, 0x38, 0x43, 0x41, 0x74, 0x3d, 0xae, 0x45, - 0x24, 0x38, 0xa6, 0x89, 0x08, 0x80, 0xea, 0x25, 0x1c, 0xa7, 0x02, 0x2c, 0x3e, 0x3e, 0xc8, 0x1b, - 0x03, 0xfc, 0x54, 0x15, 0x70, 0x00, 0xbf, 0xc8, 0xc1, 0x21, 0x60, 0xf9, 0x9f, 0x24, 0x58, 0xcc, - 0x08, 0x1d, 0xe8, 0x6b, 0x91, 0x14, 0xfb, 0x6b, 0xb1, 0x14, 0x7b, 0x3b, 0xc3, 0x2c, 0x94, 0x67, - 0x0d, 0x98, 0xb4, 0x69, 0xaf, 0x8c, 0x0e, 0x57, 0x11, 0x31, 0xf2, 0xd1, 0x80, 0x6e, 0xe0, 0xb0, - 0x4d, 0x10, 0xf3, 0x67, 0x2f, 0xce, 0xab, 0x93, 0x11, 0x19, 0x8e, 0xc2, 0xcb, 0x7f, 0x56, 0x00, - 0x58, 0x27, 0x96, 0x6e, 0xf6, 0xbb, 0xc4, 0xb8, 0x0e, 0x0e, 0xb5, 0x17, 0xe1, 0x50, 0xef, 0x0e, - 0x9a, 0x1e, 0xbf, 0x69, 0x99, 0x24, 0xea, 0x9b, 0x31, 0x12, 0x55, 0xcf, 0x0f, 0x79, 0x39, 0x8b, - 0xfa, 0xb7, 0x22, 0xcc, 0x05, 0xca, 0x01, 0x8d, 0x7a, 0x12, 0x99, 0xe3, 0x5f, 0x8d, 0xcd, 0xf1, - 0x62, 0x8a, 0xc9, 0x6b, 0xe3, 0x51, 0x9f, 0xc2, 0x14, 0x65, 0x39, 0x7c, 0x2e, 0x19, 0x87, 0x1a, - 0x1b, 0x9a, 0x43, 0xf9, 0xd9, 0x6e, 0x3b, 0x82, 0x84, 0x63, 0xc8, 0x19, 0x9c, 0xad, 0xf4, 0x8b, - 0xc8, 0xd9, 0x7e, 0x24, 0xc1, 0x54, 0x30, 0x4d, 0xd7, 0x40, 0xda, 0x76, 0xa3, 0xa4, 0xed, 0xed, - 0xdc, 0x2e, 0x9a, 0xc1, 0xda, 0xfe, 0x9b, 0x12, 0x7c, 0x5f, 0x89, 0x2e, 0xf0, 0x43, 0x45, 0x3d, - 0x19, 0xbc, 0xc7, 0x43, 0xdf, 0x97, 0x00, 0x89, 0x2c, 0xb0, 0x6a, 0x18, 0xa6, 0xab, 0xf0, 0x58, - 0xc9, 0x9b, 0xb5, 0x95, 0xbb, 0x59, 0x5e, 0x8d, 0xb5, 0x83, 0x04, 0xd6, 0x86, 0xe1, 0xda, 0xfd, - 0x60, 0x92, 0x93, 0x0a, 0x38, 0xa5, 0x01, 0x48, 0x01, 0xb0, 0x05, 0xe6, 0xbe, 0x29, 0x16, 0xf2, - 0xbb, 0x39, 0x62, 0x1e, 0x35, 0x58, 0x33, 0x8d, 0x23, 0xad, 0x13, 0x84, 0x1d, 0xec, 0x03, 0xe1, - 0x10, 0xe8, 0xd2, 0x06, 0x2c, 0x66, 0xb4, 0x16, 0xcd, 0x40, 0xf1, 0x84, 0xf4, 0xf9, 0xb0, 0x61, - 0xfa, 0x27, 0x9a, 0x87, 0xd1, 0x53, 0x45, 0xef, 0xf1, 0xf0, 0x3b, 0x8e, 0xf9, 0xc7, 0x07, 0x85, - 0xf7, 0x25, 0xf9, 0x87, 0xa3, 0x61, 0xdf, 0x61, 0x8c, 0xf9, 0x1e, 0xdd, 0xb4, 0x5a, 0xba, 0xa6, - 0x2a, 0x8e, 0x20, 0x42, 0x37, 0xf9, 0x86, 0x95, 0x97, 0x61, 0x5f, 0x1a, 0xe1, 0xd6, 0x85, 0xd7, - 0xcb, 0xad, 0x8b, 0xaf, 0x86, 0x5b, 0xff, 0x26, 0x94, 0x1d, 0x8f, 0x55, 0x8f, 0x30, 0xc8, 0x07, - 0x43, 0xc4, 0x57, 0x41, 0xa8, 0xfd, 0x0a, 0x7c, 0x2a, 0xed, 0x83, 0xa6, 0x91, 0xe8, 0xd1, 0x21, - 0x49, 0xf4, 0x2b, 0x25, 0xbe, 0x34, 0xde, 0x58, 0x4a, 0xcf, 0x21, 0x6d, 0x16, 0xdb, 0xca, 0x41, - 0xbc, 0x69, 0xb2, 0x52, 0x2c, 0xa4, 0xe8, 0x93, 0x88, 0xcb, 0x96, 0xaf, 0xe2, 0xb2, 0x53, 0xd9, - 0xee, 0x8a, 0x0e, 0x60, 0xd1, 0xb2, 0xcd, 0x8e, 0x4d, 0x1c, 0x67, 0x9d, 0x28, 0x6d, 0x5d, 0x33, - 0x88, 0x37, 0x3e, 0x9c, 0x11, 0xdd, 0xbe, 0x38, 0xaf, 0x2e, 0x36, 0xd3, 0x55, 0x70, 0x96, 0xad, - 0xfc, 0x62, 0x04, 0x66, 0xe2, 0x19, 0x30, 0x83, 0xa4, 0x4a, 0x57, 0x22, 0xa9, 0xf7, 0x43, 0x8b, - 0x81, 0x33, 0xf8, 0xd0, 0x09, 0x4e, 0x62, 0x41, 0xac, 0xc2, 0xb4, 0x88, 0x06, 0x9e, 0x50, 0xd0, - 0x74, 0x7f, 0xf6, 0x0f, 0xa2, 0x62, 0x1c, 0xd7, 0x47, 0x4f, 0x60, 0xd2, 0x66, 0xbc, 0xdb, 0x03, - 0xe0, 0xdc, 0xf5, 0x96, 0x00, 0x98, 0xc4, 0x61, 0x21, 0x8e, 0xea, 0x52, 0xde, 0x1a, 0xd0, 0x51, - 0x0f, 0x60, 0x24, 0xca, 0x5b, 0x57, 0xe3, 0x0a, 0x38, 0x69, 0x83, 0x76, 0x60, 0xae, 0x67, 0x24, - 0xa1, 0xb8, 0x2b, 0xdf, 0x16, 0x50, 0x73, 0x07, 0x49, 0x15, 0x9c, 0x66, 0x87, 0x8e, 0x22, 0x54, - 0x76, 0x8c, 0x85, 0xe7, 0x95, 0xdc, 0x0b, 0x2f, 0x37, 0x97, 0x4d, 0xa1, 0xdb, 0xe5, 0xbc, 0x74, - 0x5b, 0xfe, 0x7b, 0x29, 0x9c, 0x84, 0x7c, 0x0a, 0x3c, 0xe8, 0x94, 0x29, 0x61, 0x11, 0x62, 0x47, - 0x66, 0x3a, 0xfb, 0x7d, 0x3c, 0x14, 0xfb, 0x0d, 0x92, 0xe7, 0x60, 0xfa, 0xfb, 0x99, 0x04, 0x0b, - 0x9b, 0xad, 0xa7, 0xb6, 0xd9, 0xb3, 0xbc, 0xe6, 0xec, 0x59, 0x7c, 0x68, 0xbe, 0x0c, 0x23, 0x76, - 0x4f, 0xf7, 0xfa, 0xf1, 0xa6, 0xd7, 0x0f, 0xdc, 0xd3, 0x69, 0x3f, 0xe6, 0x62, 0x56, 0xbc, 0x13, - 0xd4, 0x00, 0xed, 0xc2, 0x98, 0xad, 0x18, 0x1d, 0xe2, 0xa5, 0xd5, 0xb7, 0x06, 0xb4, 0x7e, 0x6b, - 0x1d, 0x53, 0xf5, 0x10, 0xb1, 0x61, 0xd6, 0x58, 0xa0, 0xc8, 0xff, 0x20, 0xc1, 0xf4, 0xb3, 0xfd, - 0xfd, 0xe6, 0x96, 0xc1, 0x56, 0x34, 0x3b, 0x5b, 0xbd, 0x0b, 0x23, 0x96, 0xe2, 0x1e, 0xc7, 0x33, - 0x3d, 0x95, 0x61, 0x26, 0x41, 0x0f, 0xa1, 0x4c, 0xff, 0xa5, 0xed, 0x62, 0x4b, 0x6a, 0x9c, 0x05, - 0xc2, 0x72, 0x53, 0x94, 0xbd, 0x0c, 0xfd, 0x8d, 0x7d, 0x4d, 0xf4, 0x2d, 0x28, 0xd1, 0xf8, 0x43, - 0x8c, 0x76, 0x4e, 0x82, 0x2e, 0x1a, 0xd5, 0xe0, 0x46, 0x01, 0xe7, 0x12, 0x05, 0xd8, 0x83, 0x93, - 0x4f, 0x60, 0x3e, 0xd4, 0x09, 0x3a, 0x8a, 0xcf, 0x69, 0x4e, 0x45, 0x2d, 0x18, 0xa5, 0xb5, 0xd3, - 0xcc, 0x59, 0xcc, 0x71, 0x04, 0x1a, 0x1b, 0x88, 0x80, 0x1f, 0xd1, 0x2f, 0x07, 0x73, 0x2c, 0x79, - 0x07, 0x26, 0xd9, 0x31, 0xb4, 0x69, 0xbb, 0x6c, 0x30, 0xd1, 0x1d, 0x28, 0x76, 0x35, 0x43, 0x64, - 0xe7, 0x09, 0x61, 0x53, 0xa4, 0x99, 0x85, 0x96, 0x33, 0xb1, 0x72, 0x26, 0xe2, 0x55, 0x20, 0x56, - 0xce, 0x30, 0x2d, 0x97, 0x9f, 0x42, 0x49, 0x4c, 0x52, 0x18, 0xa8, 0x78, 0x39, 0x50, 0x31, 0x05, - 0x68, 0x0f, 0x4a, 0x5b, 0xcd, 0x86, 0x6e, 0x72, 0xae, 0xa6, 0x6a, 0x6d, 0x3b, 0x3e, 0x83, 0x6b, - 0x5b, 0xeb, 0x18, 0x33, 0x09, 0x92, 0x61, 0x8c, 0x9c, 0xa9, 0xc4, 0x72, 0x99, 0x1f, 0x8d, 0x37, - 0x80, 0xfa, 0xc6, 0x06, 0x2b, 0xc1, 0x42, 0x22, 0xff, 0x51, 0x01, 0x4a, 0x62, 0x38, 0xae, 0x61, - 0xef, 0xb6, 0x1d, 0xd9, 0xbb, 0xbd, 0x93, 0xcf, 0x35, 0x32, 0x37, 0x6e, 0xfb, 0xb1, 0x8d, 0xdb, - 0xfd, 0x9c, 0x78, 0x97, 0xef, 0xda, 0xbe, 0x57, 0x80, 0xa9, 0xa8, 0x53, 0xa2, 0x47, 0x30, 0x41, - 0xd3, 0x94, 0xa6, 0x92, 0xdd, 0x80, 0x1d, 0xfb, 0x47, 0x37, 0xad, 0x40, 0x84, 0xc3, 0x7a, 0xa8, - 0xe3, 0x9b, 0x51, 0x3f, 0x12, 0x9d, 0xce, 0x1e, 0xd2, 0x9e, 0xab, 0xe9, 0x35, 0x7e, 0x21, 0x53, - 0xdb, 0x32, 0xdc, 0x3d, 0xbb, 0xe5, 0xda, 0x9a, 0xd1, 0x49, 0x54, 0xc4, 0x9c, 0x32, 0x8c, 0x8c, - 0xbe, 0x49, 0x53, 0xa6, 0x63, 0xf6, 0x6c, 0x95, 0xa4, 0x51, 0x5f, 0x8f, 0xb6, 0xd1, 0x05, 0xda, - 0xde, 0x36, 0x55, 0x45, 0xe7, 0x93, 0x83, 0xc9, 0x11, 0xb1, 0x89, 0xa1, 0x12, 0x8f, 0x6e, 0x72, - 0x08, 0xec, 0x83, 0xc9, 0x7f, 0x23, 0xc1, 0x84, 0x18, 0x8b, 0x6b, 0xd8, 0xe4, 0x7c, 0x23, 0xba, - 0xc9, 0x79, 0x2b, 0x67, 0xe4, 0x48, 0xdf, 0xe1, 0xfc, 0xad, 0x04, 0x4b, 0x5e, 0xd3, 0x4d, 0xa5, - 0xdd, 0x50, 0x74, 0xc5, 0x50, 0x89, 0xed, 0xf9, 0xfa, 0x12, 0x14, 0x34, 0x4b, 0xcc, 0x24, 0x08, - 0x80, 0xc2, 0x56, 0x13, 0x17, 0x34, 0x8b, 0x32, 0x90, 0x63, 0xd3, 0x71, 0xd9, 0x4e, 0x88, 0x6f, - 0xb2, 0xfd, 0x56, 0x3f, 0x13, 0xe5, 0xd8, 0xd7, 0x40, 0x07, 0x30, 0x6a, 0x99, 0xb6, 0x4b, 0xb3, - 0x7e, 0x31, 0x36, 0xbf, 0x97, 0xb4, 0x9a, 0xce, 0x9b, 0x70, 0xc4, 0x20, 0x02, 0x51, 0x18, 0xcc, - 0xd1, 0xe4, 0xdf, 0x93, 0xe0, 0x8d, 0x94, 0xf6, 0x0b, 0xc2, 0xd5, 0x86, 0x92, 0xc6, 0x85, 0x22, - 0xec, 0x7d, 0x25, 0x5f, 0xb5, 0x29, 0x43, 0x11, 0x84, 0x5c, 0x2f, 0xb4, 0x7a, 0xd0, 0xf2, 0x0f, - 0x24, 0x98, 0x4d, 0xb4, 0x97, 0xa5, 0x0e, 0xea, 0xcf, 0x62, 0xa7, 0xe2, 0xa7, 0x0e, 0xea, 0x96, - 0x4c, 0x82, 0xbe, 0x01, 0x65, 0x76, 0x8f, 0xa8, 0x9a, 0xba, 0x18, 0xc0, 0xba, 0x37, 0x80, 0x4d, - 0x51, 0xfe, 0xf2, 0xbc, 0x7a, 0x3b, 0xe5, 0x9c, 0xc2, 0x13, 0x63, 0x1f, 0x00, 0x55, 0x61, 0x94, - 0xd8, 0xb6, 0x69, 0x8b, 0x24, 0x34, 0x4e, 0x47, 0x6a, 0x83, 0x16, 0x60, 0x5e, 0x2e, 0xff, 0x45, - 0xe0, 0xa4, 0x34, 0x2b, 0xd0, 0xf6, 0xd1, 0xc9, 0x89, 0x07, 0x46, 0x3a, 0x75, 0x98, 0x49, 0x50, - 0x0f, 0x66, 0xb4, 0x58, 0x1a, 0x11, 0xab, 0xb3, 0x9e, 0x6f, 0x18, 0x7d, 0xb3, 0x46, 0x45, 0xc0, - 0xcf, 0xc4, 0x25, 0x38, 0x51, 0x85, 0x4c, 0x20, 0xa1, 0x85, 0x3e, 0x82, 0x91, 0x63, 0xd7, 0xb5, - 0x52, 0x2e, 0x4a, 0x06, 0x24, 0xaf, 0xa0, 0x09, 0x65, 0xd6, 0xbb, 0xfd, 0xfd, 0x26, 0x66, 0x50, - 0xf2, 0xdf, 0x15, 0xfc, 0xf1, 0x60, 0xbb, 0xcb, 0x0f, 0xfd, 0xde, 0xae, 0xe9, 0x8a, 0xe3, 0xb0, - 0x10, 0xc6, 0x4f, 0x42, 0xe6, 0x43, 0x0d, 0xf7, 0x65, 0x38, 0xa1, 0x8d, 0xf6, 0x83, 0xa4, 0x2e, - 0x5d, 0x25, 0xa9, 0x4f, 0xa4, 0x25, 0x74, 0xf4, 0x0c, 0x8a, 0xae, 0x9e, 0xf7, 0x44, 0x43, 0x20, - 0xee, 0x6f, 0xb7, 0x82, 0xac, 0xb8, 0xbf, 0xdd, 0xc2, 0x14, 0x02, 0xed, 0xc1, 0x28, 0x25, 0x4e, - 0x34, 0x0f, 0x14, 0xf3, 0xe7, 0x15, 0x3a, 0x82, 0xc1, 0xe2, 0xa3, 0x5f, 0x0e, 0xe6, 0x38, 0xf2, - 0xef, 0x4b, 0x30, 0x19, 0xc9, 0x16, 0xc8, 0x86, 0x9b, 0x7a, 0x68, 0xed, 0x88, 0x71, 0x78, 0x7f, - 0xf8, 0x55, 0x27, 0x16, 0xfd, 0xbc, 0xa8, 0xf7, 0x66, 0x58, 0x86, 0x23, 0x75, 0xc8, 0x0a, 0x40, - 0xd0, 0x6d, 0xba, 0x0e, 0xa8, 0xf3, 0xf2, 0x05, 0x2f, 0xd6, 0x01, 0xf5, 0x69, 0x07, 0xf3, 0x72, - 0xb4, 0x02, 0xe0, 0x10, 0xd5, 0x26, 0xee, 0x6e, 0x10, 0xb8, 0xfc, 0x74, 0xdc, 0xf2, 0x25, 0x38, - 0xa4, 0x25, 0x7f, 0x56, 0x80, 0xc9, 0x5d, 0xe2, 0x7e, 0xd7, 0xb4, 0x4f, 0x9a, 0xa6, 0xae, 0xa9, - 0xfd, 0x6b, 0x20, 0x01, 0x38, 0x42, 0x02, 0x06, 0xc5, 0xcb, 0x48, 0xeb, 0x32, 0xa9, 0xc0, 0xc7, - 0x31, 0x2a, 0xb0, 0x32, 0x14, 0xea, 0xe5, 0x84, 0xe0, 0x47, 0x12, 0x2c, 0x46, 0xf4, 0x37, 0x82, - 0x58, 0xe3, 0x07, 0x7f, 0x29, 0x57, 0xf0, 0x8f, 0xc0, 0xd0, 0x80, 0x99, 0x1e, 0xfc, 0xd1, 0x36, - 0x14, 0x5c, 0x53, 0xac, 0x8c, 0xe1, 0x30, 0x09, 0xb1, 0x83, 0x7c, 0xb6, 0x6f, 0xe2, 0x82, 0x6b, - 0xca, 0xff, 0x28, 0x41, 0x25, 0xa2, 0x15, 0x8e, 0x96, 0xaf, 0xa9, 0x07, 0x18, 0x46, 0x8e, 0x6c, - 0xb3, 0x7b, 0xe5, 0x3e, 0xf8, 0x93, 0xbc, 0x69, 0x9b, 0x5d, 0xcc, 0xb0, 0xe4, 0x1f, 0x4b, 0x30, - 0x1b, 0xd1, 0xbc, 0x06, 0x4e, 0xf2, 0x51, 0x94, 0x93, 0xdc, 0x1f, 0xa6, 0x23, 0x19, 0xcc, 0xe4, - 0xc7, 0x85, 0x58, 0x37, 0x68, 0x87, 0xd1, 0x11, 0x4c, 0x58, 0x66, 0xbb, 0xf5, 0x0a, 0x2e, 0xce, - 0xa7, 0x29, 0x57, 0x6c, 0x06, 0x58, 0x38, 0x0c, 0x8c, 0xce, 0x60, 0x96, 0xd2, 0x16, 0xc7, 0x52, - 0x54, 0xd2, 0x7a, 0x05, 0x47, 0x89, 0xb7, 0xd8, 0xcd, 0x5c, 0x1c, 0x11, 0x27, 0x2b, 0x41, 0x3b, - 0x50, 0xd2, 0x2c, 0xb6, 0x77, 0x11, 0x8b, 0x74, 0x20, 0xc1, 0xe3, 0x3b, 0x1d, 0x9e, 0x3e, 0xc4, - 0x07, 0xf6, 0x30, 0xe4, 0x7f, 0x8d, 0x7b, 0x03, 0xa3, 0xc2, 0x4f, 0x43, 0xd4, 0x43, 0xdc, 0xa1, - 0x5d, 0x8d, 0x76, 0xec, 0x0a, 0x96, 0x73, 0x55, 0xd6, 0x5e, 0x8e, 0x71, 0xa2, 0x5f, 0x81, 0x12, - 0x31, 0xda, 0x6c, 0x23, 0xc0, 0x0f, 0xa8, 0x58, 0xaf, 0x36, 0x78, 0x11, 0xf6, 0x64, 0xf2, 0x1f, - 0x14, 0x63, 0xbd, 0x62, 0x29, 0xfc, 0xd3, 0x57, 0xe6, 0x1c, 0xfe, 0x66, 0x22, 0xd3, 0x41, 0x0e, - 0x03, 0x6a, 0xc9, 0x7d, 0xfe, 0xcb, 0xc3, 0xf8, 0x7c, 0x38, 0xb7, 0x66, 0x12, 0x4b, 0xf4, 0x6d, - 0x18, 0x23, 0xbc, 0x0a, 0x9e, 0xb1, 0x1f, 0x0f, 0x53, 0x45, 0x10, 0x7e, 0x83, 0x90, 0x2d, 0xca, - 0x04, 0x2a, 0xfa, 0x1a, 0x1d, 0x2f, 0xaa, 0x4b, 0xb7, 0x3c, 0x9c, 0x99, 0x8f, 0x37, 0xee, 0xf0, - 0x6e, 0xfb, 0xc5, 0x2f, 0xcf, 0xab, 0x10, 0x7c, 0xe2, 0xb0, 0x85, 0xfc, 0xdb, 0x30, 0x97, 0x92, - 0x22, 0x90, 0x1a, 0x39, 0x55, 0xe3, 0x11, 0xb3, 0x9e, 0x6f, 0x1a, 0xf2, 0x5f, 0x0f, 0xff, 0xb3, - 0x04, 0xb3, 0x6c, 0x76, 0xd4, 0x9e, 0xad, 0xb9, 0xfd, 0x6b, 0xcb, 0xcb, 0xcf, 0x23, 0x79, 0xf9, - 0xe1, 0x80, 0x29, 0x49, 0xb4, 0x30, 0x2b, 0x37, 0xcb, 0x3f, 0x91, 0xe0, 0x56, 0x42, 0xfb, 0x1a, - 0x42, 0xf7, 0x41, 0x34, 0x74, 0x7f, 0x69, 0xd8, 0x0e, 0x65, 0x84, 0xef, 0xff, 0x9a, 0x4d, 0xe9, - 0x0e, 0x5b, 0xa5, 0x2b, 0x00, 0x96, 0xad, 0x9d, 0x6a, 0x3a, 0xe9, 0x88, 0x17, 0x2d, 0xe5, 0xd0, - 0x7b, 0x45, 0x5f, 0x82, 0x43, 0x5a, 0xc8, 0x81, 0x85, 0x36, 0x39, 0x52, 0x7a, 0xba, 0xbb, 0xda, - 0x6e, 0xaf, 0x29, 0x96, 0x72, 0xa8, 0xe9, 0x9a, 0xab, 0x89, 0xb3, 0xbf, 0xf1, 0xc6, 0x13, 0xfe, - 0xd2, 0x24, 0x4d, 0xe3, 0xe5, 0x79, 0xf5, 0x4e, 0xda, 0x55, 0xaf, 0xa7, 0xd2, 0xc7, 0x19, 0xd0, - 0xa8, 0x0f, 0x15, 0x9b, 0xfc, 0x56, 0x4f, 0xb3, 0x49, 0x7b, 0xdd, 0x36, 0xad, 0x48, 0xb5, 0x45, - 0x56, 0xed, 0xaf, 0x5f, 0x9c, 0x57, 0x2b, 0x38, 0x43, 0x67, 0x70, 0xc5, 0x99, 0xf0, 0xe8, 0x53, - 0x98, 0x53, 0xc4, 0xcb, 0xd2, 0x70, 0xad, 0x7c, 0x85, 0xbe, 0x7f, 0x71, 0x5e, 0x9d, 0x5b, 0x4d, - 0x8a, 0x07, 0x57, 0x98, 0x06, 0x8a, 0xea, 0x50, 0x3a, 0x65, 0x8f, 0x50, 0x9d, 0xca, 0x28, 0xc3, - 0xa7, 0xb9, 0xaa, 0xc4, 0xdf, 0xa5, 0x52, 0xcc, 0xb1, 0xcd, 0x16, 0x5b, 0xf9, 0x9e, 0x16, 0x7a, - 0x04, 0x13, 0x94, 0x4a, 0x8b, 0x95, 0xcf, 0xae, 0x7f, 0xca, 0x41, 0xc4, 0x7c, 0x16, 0x88, 0x70, - 0x58, 0x0f, 0x7d, 0x02, 0xe3, 0xc7, 0xe2, 0xb0, 0xd0, 0xa9, 0x94, 0x72, 0xf1, 0x84, 0xc8, 0xe1, - 0x62, 0x63, 0x56, 0x54, 0x31, 0xee, 0x15, 0x3b, 0x38, 0x40, 0x44, 0x6f, 0x43, 0x89, 0x7d, 0x6c, - 0xad, 0xb3, 0xb3, 0xf5, 0x72, 0x10, 0x57, 0x9f, 0xf1, 0x62, 0xec, 0xc9, 0x3d, 0xd5, 0xad, 0xe6, - 0x1a, 0xbb, 0xe3, 0x89, 0xa9, 0x6e, 0x35, 0xd7, 0xb0, 0x27, 0x47, 0xdf, 0x81, 0x92, 0x43, 0xb6, - 0x35, 0xa3, 0x77, 0x56, 0x81, 0x5c, 0x2f, 0x44, 0x5a, 0x1b, 0x4c, 0x3b, 0x76, 0xca, 0x1d, 0xd4, - 0x20, 0xe4, 0xd8, 0x83, 0x45, 0xc7, 0x30, 0x6e, 0xf7, 0x8c, 0x55, 0xe7, 0xc0, 0x21, 0x76, 0x65, - 0x82, 0xd5, 0x31, 0x28, 0x95, 0x60, 0x4f, 0x3f, 0x5e, 0x8b, 0x3f, 0x42, 0xbe, 0x06, 0x0e, 0xc0, - 0xd1, 0x31, 0x00, 0xfb, 0x60, 0x07, 0xea, 0x95, 0x85, 0x5c, 0x5b, 0x33, 0xec, 0x1b, 0xc4, 0xeb, - 0xe2, 0x97, 0x6a, 0xbe, 0x18, 0x87, 0xb0, 0xd1, 0x1f, 0x4a, 0x80, 0x9c, 0x9e, 0x65, 0xe9, 0xa4, - 0x4b, 0x0c, 0x57, 0xd1, 0x59, 0xa9, 0x53, 0xb9, 0xc9, 0xaa, 0xfc, 0x70, 0xd0, 0x08, 0x26, 0x0c, - 0xe3, 0x55, 0xfb, 0x77, 0x65, 0x49, 0x55, 0x9c, 0x52, 0x2f, 0x9d, 0xc4, 0x23, 0xd1, 0xeb, 0xc9, - 0x5c, 0x93, 0x98, 0x7e, 0x55, 0x11, 0x4c, 0xa2, 0x90, 0x63, 0x0f, 0x16, 0x3d, 0x87, 0x05, 0xef, - 0xb5, 0x34, 0x36, 0x4d, 0x77, 0x53, 0xd3, 0x89, 0xd3, 0x77, 0x5c, 0xd2, 0xad, 0x4c, 0x31, 0x07, - 0xf3, 0x9f, 0x8c, 0xe1, 0x54, 0x2d, 0x9c, 0x61, 0x8d, 0xba, 0x50, 0xf5, 0x82, 0x13, 0x5d, 0xb9, - 0x7e, 0x74, 0xdc, 0x70, 0x54, 0x45, 0xe7, 0xd7, 0x87, 0xd3, 0xac, 0x82, 0x37, 0x2f, 0xce, 0xab, - 0xd5, 0xf5, 0xcb, 0x55, 0xf1, 0x20, 0x2c, 0xf4, 0x2d, 0xa8, 0x28, 0x59, 0xf5, 0xcc, 0xb0, 0x7a, - 0xbe, 0x40, 0x23, 0x5e, 0x66, 0x05, 0x99, 0xd6, 0xc8, 0x85, 0x19, 0x25, 0xfa, 0x6e, 0xdd, 0xa9, - 0xcc, 0xe6, 0xba, 0x89, 0x88, 0x3d, 0x77, 0x0f, 0x8e, 0x92, 0x62, 0x02, 0x07, 0x27, 0x6a, 0x40, - 0xbf, 0x03, 0x48, 0x89, 0x3f, 0xb5, 0x77, 0x2a, 0x28, 0x57, 0xa2, 0x4b, 0xbc, 0xd1, 0x0f, 0xdc, - 0x2e, 0x21, 0x72, 0x70, 0x4a, 0x3d, 0x74, 0x0f, 0xa1, 0xc4, 0x7e, 0x1e, 0xe0, 0x54, 0x16, 0x13, - 0x6c, 0xe8, 0x92, 0xca, 0x7d, 0xbb, 0xd0, 0x2d, 0x69, 0x1c, 0x11, 0x27, 0x2b, 0x41, 0xdb, 0x30, - 0x2f, 0x0a, 0x0f, 0x0c, 0x47, 0x39, 0x22, 0xad, 0xbe, 0xa3, 0xba, 0xba, 0x53, 0x99, 0x63, 0xf1, - 0x9d, 0xdd, 0xd4, 0xaf, 0xa6, 0xc8, 0x71, 0xaa, 0x15, 0xfa, 0x10, 0x66, 0x8e, 0x4c, 0xfb, 0x50, - 0x6b, 0xb7, 0x89, 0xe1, 0x21, 0xcd, 0x33, 0x24, 0x76, 0x32, 0xb6, 0x19, 0x93, 0xe1, 0x84, 0x36, - 0x72, 0xe0, 0x96, 0x40, 0x6e, 0xda, 0xa6, 0xba, 0x63, 0xf6, 0x0c, 0x97, 0x53, 0xce, 0x5b, 0x7e, - 0x1a, 0xbd, 0xb5, 0x9a, 0xa6, 0xf0, 0xf2, 0xbc, 0x7a, 0x37, 0x7d, 0x23, 0x12, 0x28, 0xe1, 0x74, - 0x6c, 0x64, 0xc1, 0x4d, 0xf1, 0xa3, 0x0f, 0x76, 0x44, 0x57, 0xa9, 0xb0, 0xa5, 0xff, 0xc1, 0xe0, - 0x80, 0xe7, 0x9b, 0xc4, 0xd7, 0xff, 0xcc, 0xc5, 0x79, 0xf5, 0x66, 0x58, 0x01, 0x47, 0x6a, 0x60, - 0x8f, 0xfc, 0xc4, 0xd5, 0xf2, 0xf5, 0xfc, 0x50, 0x62, 0xb8, 0x47, 0x7e, 0x41, 0xd3, 0x5e, 0xd9, - 0x23, 0xbf, 0x10, 0xe4, 0xe5, 0xa7, 0x43, 0xff, 0x59, 0x80, 0xb9, 0x40, 0x39, 0xf7, 0x23, 0xbf, - 0x14, 0x93, 0x5f, 0xfe, 0x58, 0x22, 0xdf, 0xc3, 0xbb, 0x60, 0xe8, 0xfe, 0xef, 0x3d, 0xbc, 0x0b, - 0xda, 0x96, 0xb1, 0x7b, 0xf8, 0xab, 0x42, 0xb8, 0x03, 0x43, 0xbe, 0xfe, 0x7a, 0x05, 0xbf, 0x17, - 0xf8, 0x85, 0x7b, 0x40, 0x26, 0xff, 0xa4, 0x08, 0x33, 0xf1, 0xd5, 0x18, 0x79, 0x24, 0x24, 0x0d, - 0x7c, 0x24, 0xd4, 0x84, 0xf9, 0xa3, 0x9e, 0xae, 0xf7, 0x59, 0x1f, 0x42, 0x2f, 0x85, 0xf8, 0x75, - 0xfd, 0x17, 0x84, 0xe5, 0xfc, 0x66, 0x8a, 0x0e, 0x4e, 0xb5, 0x4c, 0xbe, 0x19, 0x1a, 0xf9, 0xdf, - 0xbe, 0x19, 0x1a, 0xbd, 0xc2, 0x9b, 0xa1, 0xf4, 0x67, 0x57, 0xc5, 0x2b, 0x3d, 0xbb, 0xba, 0xca, - 0x83, 0xa1, 0x94, 0x20, 0x36, 0xf0, 0x74, 0xe3, 0xab, 0x30, 0x15, 0x7d, 0xc4, 0xc6, 0xe7, 0x92, - 0xbf, 0xa3, 0x13, 0xcf, 0x22, 0x42, 0x73, 0xc9, 0xcb, 0xb1, 0xaf, 0x21, 0x5f, 0x48, 0xb0, 0x90, - 0xfe, 0x58, 0x1d, 0xe9, 0x30, 0xd5, 0x55, 0xce, 0xc2, 0x3f, 0x20, 0x90, 0xae, 0x78, 0x78, 0xc7, - 0x5e, 0x2f, 0xed, 0x44, 0xb0, 0x70, 0x0c, 0x1b, 0x7d, 0x0c, 0xe5, 0xae, 0x72, 0xd6, 0xea, 0xd9, - 0x1d, 0x72, 0xe5, 0x43, 0x42, 0xb6, 0x8c, 0x76, 0x04, 0x0a, 0xf6, 0xf1, 0xe4, 0x9f, 0x4b, 0xb0, - 0x98, 0xf1, 0x26, 0xe9, 0xff, 0x51, 0x2f, 0x7f, 0x20, 0xc1, 0x1b, 0x99, 0xdb, 0x30, 0xf4, 0x38, - 0xf2, 0x7c, 0x4a, 0x8e, 0x3d, 0x9f, 0x42, 0x49, 0xc3, 0xd7, 0xf4, 0x7a, 0xea, 0x33, 0x09, 0x2a, - 0x59, 0xfb, 0x52, 0xf4, 0x28, 0xd2, 0xc8, 0x2f, 0xc6, 0x1a, 0x39, 0x9b, 0xb0, 0x7b, 0x4d, 0x6d, - 0xfc, 0x17, 0x09, 0x6e, 0x5f, 0xc2, 0xef, 0xfc, 0xed, 0x0f, 0x69, 0x87, 0xb5, 0xd8, 0xa9, 0xbd, - 0xb8, 0x4e, 0x0c, 0xb6, 0x3f, 0x29, 0x3a, 0x38, 0xd3, 0x1a, 0x1d, 0xc0, 0xa2, 0xd8, 0x7b, 0xc5, - 0x65, 0x82, 0xba, 0xb0, 0x57, 0xa6, 0xeb, 0xe9, 0x2a, 0x38, 0xcb, 0x56, 0xfe, 0x4b, 0x09, 0x16, - 0xd2, 0x0f, 0x1c, 0xd0, 0x7b, 0x91, 0x21, 0xaf, 0xc6, 0x86, 0x7c, 0x3a, 0x66, 0x25, 0x06, 0xfc, - 0xdb, 0x30, 0x25, 0x8e, 0x25, 0x04, 0x8c, 0x70, 0x66, 0x39, 0x2d, 0x3b, 0x09, 0x08, 0x8f, 0x1c, - 0xb3, 0x65, 0x12, 0x2d, 0xc3, 0x31, 0x34, 0xf9, 0x7b, 0x05, 0x18, 0x6d, 0xa9, 0x8a, 0x4e, 0xae, - 0x81, 0x1b, 0x7f, 0x3d, 0xc2, 0x8d, 0x07, 0xfd, 0x7e, 0x93, 0xb5, 0x2a, 0x93, 0x16, 0xe3, 0x18, - 0x2d, 0x7e, 0x27, 0x17, 0xda, 0xe5, 0x8c, 0xf8, 0x2b, 0x30, 0xee, 0x57, 0x3a, 0x5c, 0xa2, 0x96, - 0xff, 0xbc, 0x00, 0x13, 0xa1, 0x2a, 0x86, 0x4c, 0xf3, 0x47, 0x11, 0x6e, 0x53, 0xcc, 0x71, 0x08, - 0x14, 0xaa, 0xab, 0xe6, 0xb1, 0x19, 0xfe, 0xfb, 0x83, 0xe0, 0xc5, 0x79, 0x92, 0xe4, 0x7c, 0x15, - 0xa6, 0x5c, 0xc5, 0xee, 0x10, 0xd7, 0xbf, 0x90, 0xe1, 0x4f, 0x53, 0xfc, 0x1f, 0xc2, 0xec, 0x47, - 0xa4, 0x38, 0xa6, 0xbd, 0xf4, 0x04, 0x26, 0x23, 0x95, 0x0d, 0xf5, 0xf3, 0x81, 0xbf, 0x96, 0xe0, - 0x8b, 0x03, 0x0f, 0x92, 0x50, 0x23, 0xb2, 0x48, 0x6a, 0xb1, 0x45, 0xb2, 0x9c, 0x0d, 0xf0, 0xfa, - 0x9e, 0xa1, 0x36, 0xd6, 0x5e, 0x7c, 0xbe, 0x7c, 0xe3, 0xa7, 0x9f, 0x2f, 0xdf, 0xf8, 0xd9, 0xe7, - 0xcb, 0x37, 0x7e, 0xf7, 0x62, 0x59, 0x7a, 0x71, 0xb1, 0x2c, 0xfd, 0xf4, 0x62, 0x59, 0xfa, 0xd9, - 0xc5, 0xb2, 0xf4, 0xef, 0x17, 0xcb, 0xd2, 0x1f, 0xff, 0x7c, 0xf9, 0xc6, 0xc7, 0x77, 0x2e, 0xfd, - 0xff, 0x1e, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x08, 0x20, 0x7f, 0x0a, 0x28, 0x42, 0x00, 0x00, -} - -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.PathPrefix) - copy(dAtA[i:], m.PathPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + // 2890 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcf, 0x6f, 0x24, 0x47, + 0xf5, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0xef, 0x37, 0x76, + 0xd4, 0x5f, 0x11, 0x36, 0x61, 0x33, 0xc3, 0x6e, 0x92, 0x25, 0x3f, 0xa4, 0x84, 0x1d, 0xef, 0x26, + 0xeb, 0xc4, 0x1e, 0x4f, 0x6a, 0xc6, 0x09, 0x8a, 0x08, 0xd0, 0xee, 0x29, 0x8f, 0x3b, 0xee, 0xe9, + 0x1e, 0x75, 0xd7, 0x98, 0x35, 0x27, 0x10, 0x5c, 0x72, 0x82, 0x4b, 0x20, 0x47, 0x10, 0x12, 0x57, + 0xae, 0x1c, 0x42, 0x04, 0x22, 0x48, 0x2b, 0xc4, 0x21, 0x12, 0x07, 0x72, 0xb2, 0x88, 0x73, 0x42, + 0xfc, 0x03, 0x68, 0x4f, 0xa8, 0x7e, 0x74, 0xf5, 0x6f, 0xbb, 0xc7, 0x38, 0x16, 0x41, 0x9c, 0x3c, + 0x5d, 0xef, 0xbd, 0x4f, 0xbd, 0xaa, 0x7a, 0xf5, 0xde, 0xa7, 0xba, 0xda, 0xf0, 0xf2, 0xee, 0xb3, + 0x7e, 0xcd, 0x72, 0xeb, 0xbb, 0xc3, 0x2d, 0xe2, 0x39, 0x84, 0x12, 0xbf, 0xbe, 0x47, 0x9c, 0xae, + 0xeb, 0xd5, 0xa5, 0xc0, 0x18, 0x58, 0x75, 0x72, 0x8f, 0x12, 0xc7, 0xb7, 0x5c, 0xc7, 0xaf, 0xef, + 0x5d, 0xdf, 0x22, 0xd4, 0xb8, 0x5e, 0xef, 0x11, 0x87, 0x78, 0x06, 0x25, 0xdd, 0xda, 0xc0, 0x73, + 0xa9, 0x8b, 0x1e, 0x11, 0xea, 0x35, 0x63, 0x60, 0xd5, 0x42, 0xf5, 0x9a, 0x54, 0x5f, 0x7c, 0xb2, + 0x67, 0xd1, 0x9d, 0xe1, 0x56, 0xcd, 0x74, 0xfb, 0xf5, 0x9e, 0xdb, 0x73, 0xeb, 0xdc, 0x6a, 0x6b, + 0xb8, 0xcd, 0x9f, 0xf8, 0x03, 0xff, 0x25, 0xd0, 0x16, 0xf5, 0x48, 0xe7, 0xa6, 0xeb, 0x91, 0xfa, + 0x5e, 0xaa, 0xc7, 0xc5, 0xa7, 0x43, 0x9d, 0xbe, 0x61, 0xee, 0x58, 0x0e, 0xf1, 0xf6, 0xeb, 0x83, + 0xdd, 0x1e, 0x6b, 0xf0, 0xeb, 0x7d, 0x42, 0x8d, 0x2c, 0xab, 0x7a, 0x9e, 0x95, 0x37, 0x74, 0xa8, + 0xd5, 0x27, 0x29, 0x83, 0x9b, 0xc7, 0x19, 0xf8, 0xe6, 0x0e, 0xe9, 0x1b, 0x29, 0xbb, 0xa7, 0xf2, + 0xec, 0x86, 0xd4, 0xb2, 0xeb, 0x96, 0x43, 0x7d, 0xea, 0x25, 0x8d, 0xf4, 0xf7, 0x4a, 0x30, 0x79, + 0xdb, 0x20, 0x7d, 0xd7, 0x69, 0x13, 0x8a, 0xbe, 0x03, 0x55, 0x36, 0x8c, 0xae, 0x41, 0x8d, 0x05, + 0xed, 0x51, 0xed, 0xea, 0xd4, 0x8d, 0xaf, 0xd6, 0xc2, 0x69, 0x56, 0xa8, 0xb5, 0xc1, 0x6e, 0x8f, + 0x35, 0xf8, 0x35, 0xa6, 0x5d, 0xdb, 0xbb, 0x5e, 0xdb, 0xd8, 0x7a, 0x87, 0x98, 0x74, 0x9d, 0x50, + 0xa3, 0x81, 0xee, 0x1f, 0x2c, 0x9f, 0x3b, 0x3c, 0x58, 0x86, 0xb0, 0x0d, 0x2b, 0x54, 0xd4, 0x84, + 0x31, 0x7f, 0x40, 0xcc, 0x85, 0x12, 0x47, 0xbf, 0x56, 0x3b, 0x72, 0x11, 0x6b, 0xca, 0xb3, 0xf6, + 0x80, 0x98, 0x8d, 0xf3, 0x12, 0x79, 0x8c, 0x3d, 0x61, 0x8e, 0x83, 0xde, 0x80, 0x71, 0x9f, 0x1a, + 0x74, 0xe8, 0x2f, 0x94, 0x39, 0x62, 0xad, 0x30, 0x22, 0xb7, 0x6a, 0xcc, 0x48, 0xcc, 0x71, 0xf1, + 0x8c, 0x25, 0x9a, 0xfe, 0xf7, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x5d, 0x8b, 0x5a, 0xae, 0x83, + 0x9e, 0x87, 0x31, 0xba, 0x3f, 0x20, 0x7c, 0x72, 0x26, 0x1b, 0x8f, 0x05, 0x0e, 0x75, 0xf6, 0x07, + 0xe4, 0xc1, 0xc1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25, 0x6e, + 0xfd, 0x74, 0xbc, 0xeb, 0x07, 0x07, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1, 0x1e, + 0x20, 0xdb, 0xf0, 0x69, 0xc7, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x89, 0x9c, 0x84, 0x27, 0x8a, + 0x2d, 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x1e, 0x83, + 0x71, 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b, 0x29, + 0x7a, 0x1c, 0x26, 0xfa, 0xc4, 0xf7, 0x8d, 0x1e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38, 0xb1, + 0x2e, 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd0, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4, 0xcd, + 0x54, 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d, 0x91, + 0x18, 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x9f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0x8d, 0xab, 0x45, 0x43, + 0xa6, 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0xa7, 0x63, 0x11, 0xf7, 0x59, + 0x68, 0xa2, 0xb7, 0xa1, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0x4f, 0x15, 0x74, 0xdf, + 0xd8, 0x22, 0x76, 0x5b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x27, 0xac, 0x20, 0xd1, 0xeb, 0x50, + 0xa5, 0xa4, 0x3f, 0xb0, 0x0d, 0x4a, 0xe4, 0x3e, 0xfa, 0xff, 0xe8, 0x10, 0x58, 0xe4, 0x30, 0xb0, + 0x96, 0xdb, 0xed, 0x48, 0x35, 0xbe, 0x7d, 0xd4, 0x94, 0x04, 0xad, 0x58, 0xc1, 0xa0, 0x3d, 0x98, + 0x19, 0x0e, 0xba, 0x4c, 0x93, 0xb2, 0xec, 0xd0, 0xdb, 0x97, 0x91, 0x74, 0xb3, 0xe8, 0xdc, 0x6c, + 0xc6, 0xac, 0x1b, 0x97, 0x65, 0x5f, 0x33, 0xf1, 0x76, 0x9c, 0xe8, 0x05, 0xdd, 0x82, 0xd9, 0xbe, + 0xe5, 0x60, 0x62, 0x74, 0xf7, 0xdb, 0xc4, 0x74, 0x9d, 0xae, 0xcf, 0xc3, 0xaa, 0xd2, 0x98, 0x97, + 0x00, 0xb3, 0xeb, 0x71, 0x31, 0x4e, 0xea, 0xa3, 0x57, 0x01, 0x05, 0xc3, 0x78, 0x45, 0x24, 0x37, + 0xcb, 0x75, 0x78, 0xcc, 0x95, 0xc3, 0xe0, 0xee, 0xa4, 0x34, 0x70, 0x86, 0x15, 0x5a, 0x83, 0x39, + 0x8f, 0xec, 0x59, 0x6c, 0x8c, 0x77, 0x2d, 0x9f, 0xba, 0xde, 0xfe, 0x9a, 0xd5, 0xb7, 0xe8, 0xc2, + 0x38, 0xf7, 0x69, 0xe1, 0xf0, 0x60, 0x79, 0x0e, 0x67, 0xc8, 0x71, 0xa6, 0x95, 0xfe, 0xb3, 0x71, + 0x98, 0x4d, 0xe4, 0x1b, 0xf4, 0x06, 0x5c, 0x36, 0x87, 0x9e, 0x47, 0x1c, 0xda, 0x1c, 0xf6, 0xb7, + 0x88, 0xd7, 0x36, 0x77, 0x48, 0x77, 0x68, 0x93, 0x2e, 0x0f, 0x94, 0x4a, 0x63, 0x49, 0x7a, 0x7c, + 0x79, 0x25, 0x53, 0x0b, 0xe7, 0x58, 0xb3, 0x59, 0x70, 0x78, 0xd3, 0xba, 0xe5, 0xfb, 0x0a, 0xb3, + 0xc4, 0x31, 0xd5, 0x2c, 0x34, 0x53, 0x1a, 0x38, 0xc3, 0x8a, 0xf9, 0xd8, 0x25, 0xbe, 0xe5, 0x91, + 0x6e, 0xd2, 0xc7, 0x72, 0xdc, 0xc7, 0xdb, 0x99, 0x5a, 0x38, 0xc7, 0x1a, 0x3d, 0x03, 0x53, 0xa2, + 0x37, 0xbe, 0x7e, 0x72, 0xa1, 0x2f, 0x49, 0xb0, 0xa9, 0x66, 0x28, 0xc2, 0x51, 0x3d, 0x36, 0x34, + 0x77, 0xcb, 0x27, 0xde, 0x1e, 0xe9, 0xe6, 0x2f, 0xf0, 0x46, 0x4a, 0x03, 0x67, 0x58, 0xb1, 0xa1, + 0x89, 0x08, 0x4c, 0x0d, 0x6d, 0x3c, 0x3e, 0xb4, 0xcd, 0x4c, 0x2d, 0x9c, 0x63, 0xcd, 0xe2, 0x58, + 0xb8, 0x7c, 0x6b, 0xcf, 0xb0, 0x6c, 0x63, 0xcb, 0x26, 0x0b, 0x13, 0xf1, 0x38, 0x6e, 0xc6, 0xc5, + 0x38, 0xa9, 0x8f, 0x5e, 0x81, 0x8b, 0xa2, 0x69, 0xd3, 0x31, 0x14, 0x48, 0x95, 0x83, 0x3c, 0x2c, + 0x41, 0x2e, 0x36, 0x93, 0x0a, 0x38, 0x6d, 0x83, 0x9e, 0x87, 0x19, 0xd3, 0xb5, 0x6d, 0x1e, 0x8f, + 0x2b, 0xee, 0xd0, 0xa1, 0x0b, 0x93, 0x1c, 0x05, 0xb1, 0xfd, 0xb8, 0x12, 0x93, 0xe0, 0x84, 0x26, + 0x22, 0x00, 0x66, 0x50, 0x70, 0xfc, 0x05, 0xe0, 0xf9, 0xf1, 0x7a, 0xd1, 0x1c, 0xa0, 0x4a, 0x55, + 0xc8, 0x01, 0x54, 0x93, 0x8f, 0x23, 0xc0, 0xfa, 0x9f, 0x34, 0x98, 0xcf, 0x49, 0x1d, 0xe8, 0xa5, + 0x58, 0x89, 0xfd, 0x4a, 0xa2, 0xc4, 0x5e, 0xc9, 0x31, 0x8b, 0xd4, 0x59, 0x07, 0xa6, 0x3d, 0x36, + 0x2a, 0xa7, 0x27, 0x54, 0x64, 0x8e, 0x7c, 0xe6, 0x98, 0x61, 0xe0, 0xa8, 0x4d, 0x98, 0xf3, 0x2f, + 0x1e, 0x1e, 0x2c, 0x4f, 0xc7, 0x64, 0x38, 0x0e, 0xaf, 0xbf, 0x5f, 0x02, 0xb8, 0x4d, 0x06, 0xb6, + 0xbb, 0xdf, 0x27, 0xce, 0x59, 0x70, 0xa8, 0x8d, 0x18, 0x87, 0x7a, 0xf2, 0xb8, 0xe5, 0x51, 0xae, + 0xe5, 0x92, 0xa8, 0x37, 0x13, 0x24, 0xaa, 0x5e, 0x1c, 0xf2, 0x68, 0x16, 0xf5, 0xd7, 0x32, 0x5c, + 0x0a, 0x95, 0x43, 0x1a, 0xf5, 0x42, 0x6c, 0x8d, 0xbf, 0x9c, 0x58, 0xe3, 0xf9, 0x0c, 0x93, 0xcf, + 0x8d, 0x47, 0xbd, 0x03, 0x33, 0x8c, 0xe5, 0x88, 0xb5, 0xe4, 0x1c, 0x6a, 0x7c, 0x64, 0x0e, 0xa5, + 0xaa, 0xdd, 0x5a, 0x0c, 0x09, 0x27, 0x90, 0x73, 0x38, 0xdb, 0xc4, 0x17, 0x91, 0xb3, 0x7d, 0xa8, + 0xc1, 0x4c, 0xb8, 0x4c, 0x67, 0x40, 0xda, 0x9a, 0x71, 0xd2, 0xf6, 0x78, 0xe1, 0x10, 0xcd, 0x61, + 0x6d, 0xff, 0x64, 0x04, 0x5f, 0x29, 0xb1, 0x0d, 0xbe, 0x65, 0x98, 0xbb, 0xe8, 0x51, 0x18, 0x73, + 0x8c, 0x7e, 0x10, 0x99, 0x6a, 0xb3, 0x34, 0x8d, 0x3e, 0xc1, 0x5c, 0x82, 0xde, 0xd3, 0x00, 0xc9, + 0x2a, 0x70, 0xcb, 0x71, 0x5c, 0x6a, 0x88, 0x5c, 0x29, 0xdc, 0x5a, 0x2d, 0xec, 0x56, 0xd0, 0x63, + 0x6d, 0x33, 0x85, 0x75, 0xc7, 0xa1, 0xde, 0x7e, 0xb8, 0xc8, 0x69, 0x05, 0x9c, 0xe1, 0x00, 0x32, + 0x00, 0x3c, 0x89, 0xd9, 0x71, 0xe5, 0x46, 0x7e, 0xb2, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, + 0x6d, 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x81, 0xf9, 0x1c, 0x6f, 0xd1, + 0x05, 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x44, 0x73, 0x50, 0xd9, 0x33, 0xec, 0xa1, + 0x48, 0xbf, 0x93, 0x58, 0x3c, 0x3c, 0x5f, 0x7a, 0x56, 0xd3, 0x3f, 0xa8, 0x44, 0x63, 0x87, 0x33, + 0xe6, 0xab, 0x50, 0xf5, 0xc8, 0xc0, 0xb6, 0x4c, 0xc3, 0x97, 0x44, 0x88, 0x93, 0x5f, 0x2c, 0xdb, + 0xb0, 0x92, 0xc6, 0xb8, 0x75, 0xe9, 0xf3, 0xe5, 0xd6, 0xe5, 0xd3, 0xe1, 0xd6, 0xdf, 0x86, 0xaa, + 0x1f, 0xb0, 0xea, 0x31, 0x0e, 0x79, 0x7d, 0x84, 0xfc, 0x2a, 0x09, 0xb5, 0xea, 0x40, 0x51, 0x69, + 0x05, 0x9a, 0x45, 0xa2, 0x2b, 0x23, 0x92, 0xe8, 0x53, 0x25, 0xbe, 0x2c, 0xdf, 0x0c, 0x8c, 0xa1, + 0x4f, 0xba, 0x3c, 0xb7, 0x55, 0xc3, 0x7c, 0xd3, 0xe2, 0xad, 0x58, 0x4a, 0xd1, 0xdb, 0xb1, 0x90, + 0xad, 0x9e, 0x24, 0x64, 0x67, 0xf2, 0xc3, 0x15, 0x6d, 0xc2, 0xfc, 0xc0, 0x73, 0x7b, 0x1e, 0xf1, + 0xfd, 0xdb, 0xc4, 0xe8, 0xda, 0x96, 0x43, 0x82, 0xf9, 0x11, 0x8c, 0xe8, 0xca, 0xe1, 0xc1, 0xf2, + 0x7c, 0x2b, 0x5b, 0x05, 0xe7, 0xd9, 0xea, 0xf7, 0xc7, 0xe0, 0x42, 0xb2, 0x02, 0xe6, 0x90, 0x54, + 0xed, 0x44, 0x24, 0xf5, 0x5a, 0x64, 0x33, 0x08, 0x06, 0xaf, 0x56, 0x3f, 0x63, 0x43, 0xdc, 0x82, + 0x59, 0x99, 0x0d, 0x02, 0xa1, 0xa4, 0xe9, 0x6a, 0xf5, 0x37, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0x2f, + 0xc0, 0xb4, 0xc7, 0x79, 0x77, 0x00, 0x20, 0xb8, 0xeb, 0x43, 0x12, 0x60, 0x1a, 0x47, 0x85, 0x38, + 0xae, 0xcb, 0x78, 0x6b, 0x48, 0x47, 0x03, 0x80, 0xb1, 0x38, 0x6f, 0xbd, 0x95, 0x54, 0xc0, 0x69, + 0x1b, 0xb4, 0x0e, 0x97, 0x86, 0x4e, 0x1a, 0x4a, 0x84, 0xf2, 0x15, 0x09, 0x75, 0x69, 0x33, 0xad, + 0x82, 0xb3, 0xec, 0xd0, 0x76, 0x8c, 0xca, 0x8e, 0xf3, 0xf4, 0x7c, 0xa3, 0xf0, 0xc6, 0x2b, 0xcc, + 0x65, 0x33, 0xe8, 0x76, 0xb5, 0x28, 0xdd, 0xd6, 0x7f, 0xaf, 0x45, 0x8b, 0x90, 0xa2, 0xc0, 0xc7, + 0xbd, 0x65, 0x4a, 0x59, 0x44, 0xd8, 0x91, 0x9b, 0xcd, 0x7e, 0x6f, 0x8e, 0xc4, 0x7e, 0xc3, 0xe2, + 0x79, 0x3c, 0xfd, 0xfd, 0x83, 0x06, 0xb3, 0x77, 0x3b, 0x9d, 0xd6, 0xaa, 0xc3, 0x77, 0x4b, 0xcb, + 0xa0, 0x3b, 0xac, 0x8a, 0x0e, 0x0c, 0xba, 0x93, 0xac, 0xa2, 0x4c, 0x86, 0xb9, 0x04, 0x3d, 0x0d, + 0x55, 0xf6, 0x97, 0x39, 0xce, 0xc3, 0x75, 0x92, 0x27, 0x99, 0x6a, 0x4b, 0xb6, 0x3d, 0x88, 0xfc, + 0xc6, 0x4a, 0x13, 0x7d, 0x03, 0x26, 0xd8, 0xde, 0x26, 0x4e, 0xb7, 0x20, 0xf9, 0x95, 0x4e, 0x35, + 0x84, 0x51, 0xc8, 0x67, 0x64, 0x03, 0x0e, 0xe0, 0xf4, 0x5d, 0x98, 0x8b, 0x0c, 0x02, 0x0f, 0x6d, + 0xf2, 0x06, 0xab, 0x57, 0xa8, 0x0d, 0x15, 0xd6, 0x3b, 0xab, 0x4a, 0xe5, 0x02, 0xaf, 0x17, 0x13, + 0x13, 0x11, 0x72, 0x0f, 0xf6, 0xe4, 0x63, 0x81, 0xa5, 0x6f, 0xc0, 0xc4, 0x6a, 0xab, 0x61, 0xbb, + 0x82, 0x6f, 0x98, 0x56, 0xd7, 0x4b, 0xce, 0xd4, 0xca, 0xea, 0x6d, 0x8c, 0xb9, 0x04, 0xe9, 0x30, + 0x4e, 0xee, 0x99, 0x64, 0x40, 0x39, 0xc5, 0x98, 0x6c, 0x00, 0x4b, 0xa4, 0x77, 0x78, 0x0b, 0x96, + 0x12, 0xfd, 0xc7, 0x25, 0x98, 0x90, 0xdd, 0x9e, 0xc1, 0xf9, 0x63, 0x2d, 0x76, 0xfe, 0x78, 0xa2, + 0xd8, 0x12, 0xe4, 0x1e, 0x3e, 0x3a, 0x89, 0xc3, 0xc7, 0xb5, 0x82, 0x78, 0x47, 0x9f, 0x3c, 0xde, + 0x2d, 0xc1, 0x4c, 0x7c, 0xf1, 0xd1, 0x33, 0x30, 0xc5, 0x52, 0xad, 0x65, 0x92, 0x66, 0xc8, 0xf0, + 0xd4, 0xeb, 0x87, 0x76, 0x28, 0xc2, 0x51, 0x3d, 0xd4, 0x53, 0x66, 0x2d, 0xd7, 0xa3, 0x72, 0xd0, + 0xf9, 0x53, 0x3a, 0xa4, 0x96, 0x5d, 0x13, 0x2f, 0xdb, 0x6b, 0xab, 0x0e, 0xdd, 0xf0, 0xda, 0xd4, + 0xb3, 0x9c, 0x5e, 0xaa, 0x23, 0x06, 0x86, 0xa3, 0xc8, 0xe8, 0x4d, 0x96, 0xf6, 0x7d, 0x77, 0xe8, + 0x99, 0x24, 0x8b, 0xbe, 0x05, 0xd4, 0x83, 0x6d, 0x84, 0xee, 0x9a, 0x6b, 0x1a, 0xb6, 0x58, 0x1c, + 0x4c, 0xb6, 0x89, 0x47, 0x1c, 0x93, 0x04, 0x94, 0x49, 0x40, 0x60, 0x05, 0xa6, 0xff, 0x46, 0x83, + 0x29, 0x39, 0x17, 0x67, 0x40, 0xd4, 0x5f, 0x8b, 0x13, 0xf5, 0xc7, 0x0a, 0xee, 0xd0, 0x6c, 0x96, + 0xfe, 0x5b, 0x0d, 0x16, 0x03, 0xd7, 0x5d, 0xa3, 0xdb, 0x30, 0x6c, 0xc3, 0x31, 0x89, 0x17, 0xc4, + 0xfa, 0x22, 0x94, 0xac, 0x81, 0x5c, 0x49, 0x90, 0x00, 0xa5, 0xd5, 0x16, 0x2e, 0x59, 0x03, 0x56, + 0x45, 0x77, 0x5c, 0x9f, 0x72, 0x36, 0x2f, 0x0e, 0x8a, 0xca, 0xeb, 0xbb, 0xb2, 0x1d, 0x2b, 0x0d, + 0xb4, 0x09, 0x95, 0x81, 0xeb, 0x51, 0x56, 0xb9, 0xca, 0x89, 0xf5, 0x3d, 0xc2, 0x6b, 0xb6, 0x6e, + 0x32, 0x10, 0xc3, 0x9d, 0xce, 0x60, 0xb0, 0x40, 0xd3, 0x7f, 0xa0, 0xc1, 0xc3, 0x19, 0xfe, 0x4b, + 0xd2, 0xd0, 0x85, 0x09, 0x4b, 0x08, 0x65, 0x7a, 0x79, 0xae, 0x58, 0xb7, 0x19, 0x53, 0x11, 0xa6, + 0xb6, 0x20, 0x85, 0x05, 0xd0, 0xfa, 0x2f, 0x35, 0xb8, 0x98, 0xf2, 0x97, 0xa7, 0x68, 0x16, 0xcf, + 0x92, 0x6d, 0xab, 0x14, 0xcd, 0xc2, 0x92, 0x4b, 0xd0, 0x6b, 0x50, 0xe5, 0x77, 0x44, 0xa6, 0x6b, + 0xcb, 0x09, 0xac, 0x07, 0x13, 0xd8, 0x92, 0xed, 0x0f, 0x0e, 0x96, 0xaf, 0x64, 0x9c, 0xb5, 0x03, + 0x31, 0x56, 0x00, 0x68, 0x19, 0x2a, 0xc4, 0xf3, 0x5c, 0x4f, 0x26, 0xfb, 0x49, 0x36, 0x53, 0x77, + 0x58, 0x03, 0x16, 0xed, 0xfa, 0xaf, 0xc2, 0x20, 0x65, 0xd9, 0x97, 0xf9, 0xc7, 0x16, 0x27, 0x99, + 0x18, 0xd9, 0xd2, 0x61, 0x2e, 0x41, 0x43, 0xb8, 0x60, 0x25, 0xd2, 0xb5, 0xdc, 0x9d, 0xf5, 0x62, + 0xd3, 0xa8, 0xcc, 0x1a, 0x0b, 0x12, 0xfe, 0x42, 0x52, 0x82, 0x53, 0x5d, 0xe8, 0x04, 0x52, 0x5a, + 0xe8, 0x75, 0x18, 0xdb, 0xa1, 0x74, 0x90, 0xf1, 0xb2, 0xff, 0x98, 0x22, 0x11, 0xba, 0x50, 0xe5, + 0xa3, 0xeb, 0x74, 0x5a, 0x98, 0x43, 0xe9, 0xbf, 0x2b, 0xa9, 0xf9, 0xe0, 0x27, 0xa4, 0xaf, 0xab, + 0xd1, 0xae, 0xd8, 0x86, 0xef, 0xf3, 0x14, 0x26, 0x4e, 0xf3, 0x73, 0x11, 0xc7, 0x95, 0x0c, 0xa7, + 0xb4, 0x51, 0x27, 0x2c, 0x9e, 0xda, 0x49, 0x8a, 0xe7, 0x54, 0x56, 0xe1, 0x44, 0x77, 0xa1, 0x4c, + 0xed, 0xa2, 0xa7, 0x72, 0x89, 0xd8, 0x59, 0x6b, 0x37, 0xa6, 0xe4, 0x94, 0x97, 0x3b, 0x6b, 0x6d, + 0xcc, 0x20, 0xd0, 0x06, 0x54, 0xbc, 0xa1, 0x4d, 0x58, 0x1d, 0x28, 0x17, 0xaf, 0x2b, 0x6c, 0x06, + 0xc3, 0xcd, 0xc7, 0x9e, 0x7c, 0x2c, 0x70, 0xf4, 0x1f, 0x6a, 0x30, 0x1d, 0xab, 0x16, 0xc8, 0x83, + 0xf3, 0x76, 0x64, 0xef, 0xc8, 0x79, 0x78, 0x76, 0xf4, 0x5d, 0x27, 0x37, 0xfd, 0x9c, 0xec, 0xf7, + 0x7c, 0x54, 0x86, 0x63, 0x7d, 0xe8, 0x06, 0x40, 0x38, 0x6c, 0xb6, 0x0f, 0x58, 0xf0, 0x8a, 0x0d, + 0x2f, 0xf7, 0x01, 0x8b, 0x69, 0x1f, 0x8b, 0x76, 0x74, 0x03, 0xc0, 0x27, 0xa6, 0x47, 0x68, 0x33, + 0x4c, 0x5c, 0xaa, 0x1c, 0xb7, 0x95, 0x04, 0x47, 0xb4, 0xf4, 0x5f, 0x94, 0x60, 0xba, 0x49, 0xe8, + 0x77, 0x5d, 0x6f, 0xb7, 0xe5, 0xda, 0x96, 0xb9, 0x7f, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x71, + 0xf9, 0x32, 0xe6, 0x5d, 0x2e, 0x15, 0x78, 0x2b, 0x41, 0x05, 0x6e, 0x8c, 0x84, 0x7a, 0x34, 0x21, + 0xf8, 0x50, 0x83, 0xf9, 0x98, 0xfe, 0x9d, 0x30, 0xd7, 0xa8, 0xe4, 0xaf, 0x15, 0x4a, 0xfe, 0x31, + 0x18, 0x96, 0x30, 0xb3, 0x93, 0x3f, 0x5a, 0x83, 0x12, 0x75, 0xe5, 0xce, 0x18, 0x0d, 0x93, 0x10, + 0x2f, 0xac, 0x67, 0x1d, 0x17, 0x97, 0xa8, 0xab, 0xff, 0x51, 0x83, 0x85, 0x98, 0x56, 0x34, 0x5b, + 0x7e, 0x4e, 0x23, 0xc0, 0x30, 0xb6, 0xed, 0xb9, 0xfd, 0x13, 0x8f, 0x41, 0x2d, 0xf2, 0xcb, 0x9e, + 0xdb, 0xc7, 0x1c, 0x4b, 0xff, 0x48, 0x83, 0x8b, 0x31, 0xcd, 0x33, 0xe0, 0x24, 0xaf, 0xc7, 0x39, + 0xc9, 0xb5, 0x51, 0x06, 0x92, 0xc3, 0x4c, 0x3e, 0x2a, 0x25, 0x86, 0xc1, 0x06, 0x8c, 0xb6, 0x61, + 0x6a, 0xe0, 0x76, 0xdb, 0xa7, 0x70, 0xf9, 0x3b, 0xcb, 0xb8, 0x62, 0x2b, 0xc4, 0xc2, 0x51, 0x60, + 0x74, 0x0f, 0x2e, 0x32, 0xda, 0xe2, 0x0f, 0x0c, 0x93, 0xb4, 0x4f, 0xe1, 0x75, 0xd8, 0x43, 0xfc, + 0x76, 0x29, 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0x06, 0xfc, 0xec, 0x22, 0x37, 0xe9, + 0xb1, 0x04, 0x4f, 0x9c, 0x74, 0x44, 0xf9, 0x90, 0x0f, 0x38, 0xc0, 0xd0, 0xff, 0x92, 0x8c, 0x06, + 0x4e, 0x85, 0x5f, 0x89, 0x50, 0x0f, 0x79, 0x0f, 0x74, 0x32, 0xda, 0xd1, 0x94, 0x2c, 0xe7, 0xa4, + 0xac, 0xbd, 0x9a, 0xe0, 0x44, 0x5f, 0x82, 0x09, 0xe2, 0x74, 0xf9, 0x41, 0x40, 0xbc, 0x64, 0xe1, + 0xa3, 0xba, 0x23, 0x9a, 0x70, 0x20, 0xd3, 0x7f, 0x54, 0x4e, 0x8c, 0x8a, 0x97, 0xf0, 0x77, 0x4e, + 0x2d, 0x38, 0xd4, 0x61, 0x22, 0x37, 0x40, 0xb6, 0x42, 0x6a, 0x29, 0x62, 0xfe, 0x6b, 0xa3, 0xc4, + 0x7c, 0xb4, 0xb6, 0xe6, 0x12, 0x4b, 0xf4, 0x2d, 0x18, 0x27, 0xa2, 0x0b, 0x51, 0xb1, 0x6f, 0x8e, + 0xd2, 0x45, 0x98, 0x7e, 0xc3, 0x94, 0x2d, 0xdb, 0x24, 0x2a, 0x7a, 0x89, 0xcd, 0x17, 0xd3, 0x65, + 0x47, 0x1e, 0xc1, 0xcc, 0x27, 0x1b, 0x8f, 0x88, 0x61, 0xab, 0xe6, 0x07, 0x07, 0xcb, 0x10, 0x3e, + 0xe2, 0xa8, 0x85, 0xfe, 0x3d, 0xb8, 0x94, 0x51, 0x22, 0x90, 0x19, 0x7b, 0x33, 0x24, 0x32, 0x66, + 0xbd, 0xd8, 0x32, 0x14, 0xbf, 0xe2, 0x7c, 0xbf, 0x04, 0x20, 0xdf, 0x45, 0x9d, 0xcd, 0x97, 0x55, + 0xa3, 0xdd, 0x0a, 0x86, 0xae, 0x9d, 0xda, 0xad, 0x60, 0x04, 0xf2, 0xe8, 0x52, 0xfc, 0x8f, 0x12, + 0x5c, 0x0a, 0x95, 0x0b, 0xdf, 0x0a, 0x66, 0x98, 0xfc, 0xef, 0xeb, 0xaa, 0x62, 0x37, 0x75, 0xe1, + 0xd4, 0xfd, 0xe7, 0xdd, 0xd4, 0x85, 0xbe, 0xe5, 0x54, 0xda, 0x5f, 0x97, 0xa2, 0x03, 0x18, 0xf1, + 0xba, 0xe8, 0x14, 0x3e, 0x30, 0xfa, 0xc2, 0xdd, 0x38, 0xe9, 0x7f, 0x2e, 0xc3, 0x85, 0xe4, 0x6e, + 0x8c, 0xdd, 0x2a, 0x68, 0xc7, 0xde, 0x2a, 0xb4, 0x60, 0x6e, 0x7b, 0x68, 0xdb, 0xfb, 0x7c, 0x0c, + 0x91, 0xab, 0x05, 0x71, 0x1f, 0xf1, 0x7f, 0xd2, 0x72, 0xee, 0xe5, 0x0c, 0x1d, 0x9c, 0x69, 0x99, + 0xbe, 0x64, 0x18, 0xfb, 0x77, 0x2f, 0x19, 0x2a, 0x27, 0xb8, 0x64, 0xc8, 0xbe, 0xa7, 0x29, 0x9f, + 0xe8, 0x9e, 0xe6, 0x24, 0x37, 0x0c, 0x19, 0x49, 0xec, 0xd8, 0x52, 0xf2, 0x22, 0xcc, 0xc4, 0x6f, + 0xbd, 0xc4, 0x5a, 0x8a, 0x8b, 0x37, 0x79, 0xc7, 0x14, 0x59, 0x4b, 0xd1, 0x8e, 0x95, 0x86, 0x7e, + 0xa8, 0xc1, 0xe5, 0xec, 0xaf, 0x5b, 0x90, 0x0d, 0x33, 0x7d, 0xe3, 0x5e, 0xf4, 0x8b, 0x23, 0xed, + 0x84, 0x4c, 0x89, 0x5f, 0x77, 0xac, 0xc7, 0xb0, 0x70, 0x02, 0x1b, 0xbd, 0x05, 0xd5, 0xbe, 0x71, + 0xaf, 0x3d, 0xf4, 0x7a, 0xe4, 0xc4, 0x8c, 0x8c, 0x6f, 0xa3, 0x75, 0x89, 0x82, 0x15, 0x9e, 0xfe, + 0x99, 0x06, 0xf3, 0x39, 0x97, 0x18, 0xff, 0x45, 0xa3, 0x7c, 0xb7, 0x04, 0x95, 0xb6, 0x69, 0xd8, + 0xe4, 0x0c, 0x08, 0xc5, 0xab, 0x31, 0x42, 0x71, 0xdc, 0x57, 0xb2, 0xdc, 0xab, 0x5c, 0x2e, 0x81, + 0x13, 0x5c, 0xe2, 0x89, 0x42, 0x68, 0x47, 0xd3, 0x88, 0xe7, 0x60, 0x52, 0x75, 0x3a, 0x5a, 0x76, + 0xd3, 0x7f, 0x5e, 0x82, 0xa9, 0x48, 0x17, 0x23, 0xe6, 0xc6, 0xed, 0x58, 0x41, 0x28, 0x17, 0x78, + 0x83, 0x14, 0xe9, 0xab, 0x16, 0x94, 0x00, 0xf1, 0x95, 0x47, 0x78, 0xaf, 0x9f, 0xae, 0x0c, 0x2f, + 0xc2, 0x0c, 0x35, 0xbc, 0x1e, 0xa1, 0xea, 0xc8, 0x20, 0x5e, 0x9e, 0xaa, 0xcf, 0x8d, 0x3a, 0x31, + 0x29, 0x4e, 0x68, 0x2f, 0xbe, 0x00, 0xd3, 0xb1, 0xce, 0x46, 0xf9, 0x48, 0xa3, 0xb1, 0x72, 0xff, + 0xd3, 0xa5, 0x73, 0x1f, 0x7f, 0xba, 0x74, 0xee, 0x93, 0x4f, 0x97, 0xce, 0x7d, 0xff, 0x70, 0x49, + 0xbb, 0x7f, 0xb8, 0xa4, 0x7d, 0x7c, 0xb8, 0xa4, 0x7d, 0x72, 0xb8, 0xa4, 0xfd, 0xed, 0x70, 0x49, + 0xfb, 0xc9, 0x67, 0x4b, 0xe7, 0xde, 0x7a, 0xe4, 0xc8, 0xff, 0xd9, 0xf8, 0x57, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x39, 0x36, 0x95, 0x55, 0xec, 0x31, 0x00, 0x00, } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { @@ -2882,48 +2320,6 @@ func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3006,7 +2402,7 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *HostPortRange) Marshal() (dAtA []byte, err error) { +func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3016,26 +2412,34 @@ func (m *HostPortRange) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + if len(m.Except) > 0 { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *IDRange) Marshal() (dAtA []byte, err error) { +func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3045,100 +2449,34 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *IPBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Except) > 0 { - for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Except[iNdEx]) - copy(dAtA[i:], m.Except[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.CIDR) - copy(dAtA[i:], m.CIDR) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Ingress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 { @@ -4001,7 +3339,7 @@ func (m *NetworkPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4011,16 +3349,26 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -4044,7 +3392,60 @@ func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4054,12 +3455,12 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4091,7 +3492,7 @@ func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4101,49 +3502,32 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.RuntimeClass != nil { - { - size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 - } - if len(m.AllowedCSIDrivers) > 0 { - for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RunAsGroup != nil { + i-- + dAtA[i] = 0x1a + if m.Selector != nil { { - size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4151,63 +3535,40 @@ func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if len(m.AllowedProcMountTypes) > 0 { - for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedProcMountTypes[iNdEx]) - copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - } - if len(m.ForbiddenSysctls) > 0 { - for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ForbiddenSysctls[iNdEx]) - copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } + dAtA[i] = 0x12 } - if len(m.AllowedUnsafeSysctls) > 0 { - for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedUnsafeSysctls[iNdEx]) - copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - if len(m.AllowedFlexVolumes) > 0 { - for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } + return len(dAtA) - i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.AllowedHostPaths) > 0 { - for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4215,167 +3576,148 @@ func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - } - if m.AllowPrivilegeEscalation != nil { - i-- - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.DefaultAllowPrivilegeEscalation != nil { - i-- - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x32 } - i-- - dAtA[i] = 0x78 } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) i-- - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) i-- - dAtA[i] = 0x70 - { - size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- - dAtA[i] = 0x6a - { - size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - { - size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - { - size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) i-- - dAtA[i] = 0x52 + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) i-- - dAtA[i] = 0x48 - i-- - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x40 - if len(m.HostPorts) > 0 { - for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x3a + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - i-- - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Volumes[iNdEx]) - copy(dAtA[i:], m.Volumes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) - i-- - dAtA[i] = 0x2a + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - if len(m.AllowedCapabilities) > 0 { - for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedCapabilities[iNdEx]) - copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.RequiredDropCapabilities) > 0 { - for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RequiredDropCapabilities[iNdEx]) - copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x1a + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.DefaultAddCapabilities) > 0 { - for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DefaultAddCapabilities[iNdEx]) - copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - i-- - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { +func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4385,12 +3727,12 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4428,7 +3770,7 @@ func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4438,50 +3780,23 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4491,687 +3806,610 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) i-- dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil + dAtA[offset] = uint8(v) + return base +} +func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + n += 1 + sovGenerated(uint64(m.TemplateGeneration)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } - return len(dAtA) - i, nil + return n } -func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return n } -func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + return n } -func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return n } -func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n } -func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n } -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PathType != nil { + l = len(*m.PathType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.DefaultRuntimeClassName != nil { - i -= len(*m.DefaultRuntimeClassName) - copy(dAtA[i:], *m.DefaultRuntimeClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i-- - dAtA[i] = 0x12 - } - if len(m.AllowedRuntimeClassNames) > 0 { - for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedRuntimeClassNames[iNdEx]) - copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) - i-- - dAtA[i] = 0xa + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - return len(dAtA) - i, nil + return n } -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Scale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressList) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *IngressLoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i -= len(m.TargetSelector) - copy(dAtA[i:], m.TargetSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i-- - dAtA[i] = 0x1a - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.Selector[string(keysForSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForSelector[iNdEx]) - copy(dAtA[i:], keysForSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return n } -func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressLoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AllowedCSIDriver) Size() (n int) { +func (m *IngressPortStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) n += 1 + l + sovGenerated(uint64(l)) + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *AllowedFlexVolume) Size() (n int) { +func (m *IngressRule) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Driver) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *AllowedHostPath) Size() (n int) { +func (m *IngressRuleValue) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.PathPrefix) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *DaemonSet) Size() (n int) { +func (m *IngressSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.IngressClassName != nil { + l = len(*m.IngressClassName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *DaemonSetCondition) Size() (n int) { +func (m *IngressStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) + l = m.LoadBalancer.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) + return n +} + +func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() + return n +} + +func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) + l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) + l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DaemonSetList) Size() (n int) { +func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.To) > 0 { + for _, e := range m.To { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyList) Size() (n int) { if m == nil { return 0 } @@ -5188,70 +4426,92 @@ func (m *DaemonSetList) Size() (n int) { return n } -func (m *DaemonSetSpec) Size() (n int) { +func (m *NetworkPolicyPeer) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Selector != nil { - l = m.Selector.Size() + if m.PodSelector != nil { + l = m.PodSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.UpdateStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - n += 1 + sovGenerated(uint64(m.TemplateGeneration)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPBlock != nil { + l = m.IPBlock.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *DaemonSetStatus) Size() (n int) { +func (m *NetworkPolicyPort) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) - n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberReady)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberAvailable)) - n += 1 + sovGenerated(uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EndPort != nil { + n += 1 + sovGenerated(uint64(*m.EndPort)) + } + return n +} + +func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Egress) > 0 { + for _, e := range m.Egress { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *DaemonSetUpdateStrategy) Size() (n int) { +func (m *NetworkPolicyStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *Deployment) Size() (n int) { +func (m *ReplicaSet) Size() (n int) { if m == nil { return 0 } @@ -5266,7 +4526,7 @@ func (m *Deployment) Size() (n int) { return n } -func (m *DeploymentCondition) Size() (n int) { +func (m *ReplicaSetCondition) Size() (n int) { if m == nil { return 0 } @@ -5276,18 +4536,16 @@ func (m *DeploymentCondition) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Status) n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) l = len(m.Reason) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DeploymentList) Size() (n int) { +func (m *ReplicaSetList) Size() (n int) { if m == nil { return 0 } @@ -5304,28 +4562,7 @@ func (m *DeploymentList) Size() (n int) { return n } -func (m *DeploymentRollback) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UpdatedAnnotations) > 0 { - for k, v := range m.UpdatedAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentSpec) Size() (n int) { +func (m *ReplicaSetSpec) Size() (n int) { if m == nil { return 0 } @@ -5340,151 +4577,75 @@ func (m *DeploymentSpec) Size() (n int) { } l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Strategy.Size() - n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - n += 2 - if m.RollbackTo != nil { - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ProgressDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) - } return n } -func (m *DeploymentStatus) Size() (n int) { +func (m *ReplicaSetStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) if len(m.Conditions) > 0 { for _, e := range m.Conditions { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } return n } -func (m *DeploymentStrategy) Size() (n int) { +func (m *RollbackConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + n += 1 + sovGenerated(uint64(m.Revision)) return n } -func (m *FSGroupStrategyOptions) Size() (n int) { +func (m *RollingUpdateDaemonSet) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HTTPIngressPath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() n += 1 + l + sovGenerated(uint64(l)) } - return n -} - -func (m *HTTPIngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HostPortRange) Size() (n int) { - if m == nil { - return 0 + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) return n } -func (m *IDRange) Size() (n int) { +func (m *RollingUpdateDeployment) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IPBlock) Size() (n int) { - if m == nil { - return 0 + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) } - var l int - _ = l - l = len(m.CIDR) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Except) > 0 { - for _, s := range m.Except { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Ingress) Size() (n int) { +func (m *Scale) Size() (n int) { if m == nil { return 0 } @@ -5499,4006 +4660,729 @@ func (m *Ingress) Size() (n int) { return n } -func (m *IngressBackend) Size() (n int) { +func (m *ScaleSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + n += 1 + sovGenerated(uint64(m.Replicas)) return n } -func (m *IngressList) Size() (n int) { +func (m *ScaleStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - return n -} - -func (m *IngressLoadBalancerIngress) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Hostname) + l = len(m.TargetSelector) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *IngressLoadBalancerStatus) Size() (n int) { - if m == nil { - return 0 +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *IngressPortStatus) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Port)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - if m.Error != nil { - l = len(*m.Error) - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," } - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *IngressRule) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s } - -func (m *IngressRuleValue) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } - -func (m *IngressSpec) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.Backend != nil { - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" } - if len(m.TLS) > 0 { - for _, e := range m.TLS { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.IngressClassName != nil { - l = len(*m.IngressClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *IngressStatus) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *IngressTLS) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DeploymentList) String() string { + if this == nil { + return "nil" } - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NetworkPolicy) Size() (n int) { - if m == nil { - return 0 + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyEgressRule) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) } - if len(m.To) > 0 { - for _, e := range m.To { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) } - return n + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyIngressRule) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" } - if len(m.From) > 0 { - for _, e := range m.From { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPeer) Size() (n int) { - if m == nil { - return 0 +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.PodSelector != nil { - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `PathType:` + valueToStringGenerated(this.PathType) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," } - if m.IPBlock != nil { - l = m.IPBlock.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForPaths += "}" + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + repeatedStringForPaths + `,`, + `}`, + }, "") + return s +} +func (this *IPBlock) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IPBlock{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `Except:` + fmt.Sprintf("%v", this.Except) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPort) Size() (n int) { - if m == nil { - return 0 +func (this *Ingress) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.Protocol != nil { - l = len(*m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.EndPort != nil { - n += 1 + sovGenerated(uint64(*m.EndPort)) + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressBackend) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicySpec) Size() (n int) { - if m == nil { - return 0 +func (this *IngressList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," } - if len(m.Egress) > 0 { - for _, e := range m.Egress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IngressLoadBalancerIngress) String() string { + if this == nil { + return "nil" } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts := "[]IngressPortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," } - return n + repeatedStringForPorts += "}" + s := strings.Join([]string{`&IngressLoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyStatus) Size() (n int) { - if m == nil { - return 0 +func (this *IngressLoadBalancerStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForIngress := "[]IngressLoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," } - return n + repeatedStringForIngress += "}" + s := strings.Join([]string{`&IngressLoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicy) Size() (n int) { - if m == nil { - return 0 +func (this *IngressPortStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&IngressPortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicyList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *IngressRule) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicySpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressSpec) String() string { + if this == nil { + return "nil" } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForRules += "}" + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, + `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, + `}`, + }, "") + return s +} +func (this *IngressStatus) String() string { + if this == nil { + return "nil" } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.DefaultAllowPrivilegeEscalation != nil { - n += 2 + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" } - if m.AllowPrivilegeEscalation != nil { - n += 3 + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicy) String() string { + if this == nil { + return "nil" } - if len(m.AllowedHostPaths) > 0 { - for _, e := range m.AllowedHostPaths { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyEgressRule) String() string { + if this == nil { + return "nil" } - if len(m.AllowedFlexVolumes) > 0 { - for _, e := range m.AllowedFlexVolumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ReplicaSet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + repeatedStringForTo += "}" + s := strings.Join([]string{`&NetworkPolicyEgressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetCondition) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicyIngressRule) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetList) Size() (n int) { - if m == nil { - return 0 + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," } - return n + repeatedStringForFrom += "}" + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) +func (this *NetworkPolicyList) String() string { + if this == nil { + return "nil" } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *NetworkPolicyPeer) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, + `}`, + }, "") + return s } - -func (m *RollbackConfig) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicyPort) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Revision)) - return n + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, + `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, + `}`, + }, "") + return s } - -func (m *RollingUpdateDaemonSet) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicySpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," } - return n + repeatedStringForEgress += "}" + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, + `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, + `}`, + }, "") + return s } - -func (m *RollingUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (this *NetworkPolicyStatus) String() string { + if this == nil { + return "nil" } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NetworkPolicyStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } - -func (m *RunAsGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *RunAsUserStrategyOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RuntimeClassStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Scale) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.TargetSelector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllowedCSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedCSIDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedFlexVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedHostPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]DaemonSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]DaemonSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&DaemonSetStatus{`, - `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, - `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, - `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, - `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, - `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, - `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetUpdateStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Deployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Deployment{" + repeatedStringForItems := "[]ReplicaSet{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentRollback) String() string { - if this == nil { - return "nil" - } - keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) - for k := range this.UpdatedAnnotations { - keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - mapStringForUpdatedAnnotations := "map[string]string{" - for _, k := range keysForUpdatedAnnotations { - mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) - } - mapStringForUpdatedAnnotations += "}" - s := strings.Join([]string{`&DeploymentRollback{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, - `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, - `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]DeploymentCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&DeploymentStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FSGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&FSGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" - } - repeatedStringForPaths := "[]HTTPIngressPath{" - for _, f := range this.Paths { - repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," - } - repeatedStringForPaths += "}" - s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + repeatedStringForPaths + `,`, - `}`, - }, "") - return s -} -func (this *HostPortRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPortRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IDRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IDRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IPBlock) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPBlock{`, - `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, - `Except:` + fmt.Sprintf("%v", this.Except) + `,`, - `}`, - }, "") - return s -} -func (this *Ingress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressBackend) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressBackend{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Ingress{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *IngressLoadBalancerIngress) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]IngressPortStatus{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - s := strings.Join([]string{`&IngressLoadBalancerIngress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `Ports:` + repeatedStringForPorts + `,`, - `}`, - }, "") - return s -} -func (this *IngressLoadBalancerStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForIngress := "[]IngressLoadBalancerIngress{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - s := strings.Join([]string{`&IngressLoadBalancerStatus{`, - `Ingress:` + repeatedStringForIngress + `,`, - `}`, - }, "") - return s -} -func (this *IngressPortStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressPortStatus{`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `Error:` + valueToStringGenerated(this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRule{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForTLS := "[]IngressTLS{" - for _, f := range this.TLS { - repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," - } - repeatedStringForTLS += "}" - repeatedStringForRules := "[]IngressRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" - s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + repeatedStringForTLS + `,`, - `Rules:` + repeatedStringForRules + `,`, - `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, - `}`, - }, "") - return s -} -func (this *IngressStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressTLS) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyEgressRule) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForTo := "[]NetworkPolicyPeer{" - for _, f := range this.To { - repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForTo += "}" - s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `To:` + repeatedStringForTo + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyIngressRule) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForFrom := "[]NetworkPolicyPeer{" - for _, f := range this.From { - repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForFrom += "}" - s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `From:` + repeatedStringForFrom + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]NetworkPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPeer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPort{`, - `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, - `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForIngress := "[]NetworkPolicyIngressRule{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - repeatedStringForEgress := "[]NetworkPolicyEgressRule{" - for _, f := range this.Egress { - repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForEgress += "}" - s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + repeatedStringForIngress + `,`, - `Egress:` + repeatedStringForEgress + `,`, - `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&NetworkPolicyStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSecurityPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForHostPorts := "[]HostPortRange{" - for _, f := range this.HostPorts { - repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," - } - repeatedStringForHostPorts += "}" - repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" - for _, f := range this.AllowedHostPaths { - repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedHostPaths += "}" - repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" - for _, f := range this.AllowedFlexVolumes { - repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedFlexVolumes += "}" - repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" - for _, f := range this.AllowedCSIDrivers { - repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedCSIDrivers += "}" - s := strings.Join([]string{`&PodSecurityPolicySpec{`, - `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, - `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, - `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, - `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, - `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + repeatedStringForHostPorts + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, - `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, - `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, - `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, - `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, - `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, - `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, - `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, - `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, - `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, - `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ReplicaSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]ReplicaSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ReplicaSetStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *RollbackConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollbackConfig{`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDeployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RunAsUserStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Scale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleSpec{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleStatus) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, - `}`, - }, "") - return s -} -func (this *SupplementalGroupsStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) - } - m.TemplateGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TemplateGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) - } - m.CurrentNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) - } - m.NumberMisscheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberMisscheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) - } - m.DesiredNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) - } - m.NumberReady = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberReady |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) - } - m.UpdatedNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) - } - m.NumberAvailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberAvailable |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) - } - m.NumberUnavailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberUnavailable |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DaemonSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDaemonSet{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdatedAnnotations == nil { - m.UpdatedAnnotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.UpdatedAnnotations[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Paused = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollbackTo == nil { - m.RollbackTo = &RollbackConfig{} - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressDeadlineSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" } - return nil + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } -func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9521,55 +5405,17 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - m.UpdatedReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9579,52 +5425,28 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.UnavailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnavailableReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - case 6: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9651,16 +5473,15 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - m.ReadyReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9670,31 +5491,25 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.CollisionCount = &v + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9716,7 +5531,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9739,10 +5554,10 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -9775,11 +5590,43 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9806,13 +5653,74 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDeployment{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9834,7 +5742,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } return nil } -func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9857,17 +5765,17 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9877,27 +5785,28 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9924,8 +5833,8 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9950,7 +5859,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9973,17 +5882,17 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9993,27 +5902,31 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10040,96 +5953,13 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10156,66 +5986,34 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPortRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) } - m.Min = 0 + m.TemplateGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10225,16 +6023,16 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int32(b&0x7F) << shift + m.TemplateGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - case 2: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) } - m.Max = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10244,11 +6042,12 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int32(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } + m.RevisionHistoryLimit = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10270,7 +6069,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } return nil } -func (m *IDRange) Unmarshal(dAtA []byte) error { +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10293,17 +6092,17 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) } - m.Min = 0 + m.CurrentNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10313,16 +6112,16 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int64(b&0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) } - m.Max = 0 + m.NumberMisscheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10332,66 +6131,73 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int64(b&0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberReady |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) } - var stringLen uint64 + m.UpdatedNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10401,29 +6207,74 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberAvailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) } - m.CIDR = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberUnavailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10433,23 +6284,25 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -10472,7 +6325,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *Ingress) Unmarshal(dAtA []byte) error { +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10495,17 +6348,17 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10515,28 +6368,27 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10563,40 +6415,10 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10621,7 +6443,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressBackend) Unmarshal(dAtA []byte) error { +func (m *Deployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10644,17 +6466,17 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10664,27 +6486,28 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceName = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10711,13 +6534,13 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10744,10 +6567,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10772,7 +6592,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressList) Unmarshal(dAtA []byte) error { +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10795,17 +6615,17 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10815,30 +6635,29 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10848,79 +6667,27 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10948,11 +6715,11 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10980,11 +6747,11 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11001,74 +6768,23 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, IngressPortStatus{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if msglen < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11095,8 +6811,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11121,7 +6836,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { +func (m *DeploymentList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11144,36 +6859,17 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11183,29 +6879,30 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11215,24 +6912,25 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Error = &s + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -11255,7 +6953,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressRule) Unmarshal(dAtA []byte) error { +func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11278,15 +6976,15 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11314,11 +7012,11 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11345,63 +7043,107 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11428,10 +7170,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11456,7 +7195,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(dAtA []byte) error { +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11479,15 +7218,35 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11514,16 +7273,16 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Backend == nil { - m.Backend = &IngressBackend{} + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11550,14 +7309,13 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11584,16 +7342,74 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11603,25 +7419,48 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11643,7 +7482,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(dAtA []byte) error { +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11666,15 +7505,110 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11701,10 +7635,50 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11726,7 +7700,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(dAtA []byte) error { +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11749,15 +7723,15 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11785,13 +7759,13 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11801,23 +7775,27 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -11840,7 +7818,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11863,17 +7841,17 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11883,28 +7861,27 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11931,15 +7908,15 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11949,24 +7926,24 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := PathType(dAtA[iNdEx:postIndex]) + m.PathType = &s iNdEx = postIndex default: iNdEx = preIndex @@ -11989,7 +7966,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12012,49 +7989,15 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12081,8 +8024,8 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.To = append(m.To, NetworkPolicyPeer{}) - if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12107,7 +8050,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { +func (m *IPBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12130,17 +8073,17 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12150,31 +8093,29 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CIDR = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12184,25 +8125,23 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.From = append(m.From, NetworkPolicyPeer{}) - if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -12225,7 +8164,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { +func (m *Ingress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12248,15 +8187,15 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12283,13 +8222,13 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12316,8 +8255,40 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, NetworkPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12342,7 +8313,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { +func (m *IngressBackend) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12365,17 +8336,17 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12385,31 +8356,27 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodSelector == nil { - m.PodSelector = &v1.LabelSelector{} - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ServiceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12436,16 +8403,13 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12472,10 +8436,10 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.IPBlock == nil { - m.IPBlock = &IPBlock{} + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} } - if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12500,7 +8464,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { +func (m *IngressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12523,17 +8487,17 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12543,28 +8507,28 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) - m.Protocol = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12591,33 +8555,11 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Port == nil { - m.Port = &intstr.IntOrString{} - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EndPort = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -12639,7 +8581,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12662,17 +8604,17 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12682,30 +8624,29 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12715,29 +8656,27 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12764,16 +8703,66 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) - if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12783,23 +8772,25 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -12822,7 +8813,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12845,17 +8836,36 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group") + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12865,25 +8875,56 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex default: iNdEx = preIndex @@ -12906,7 +8947,7 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { +func (m *IngressRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12929,17 +8970,17 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12949,28 +8990,27 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12997,7 +9037,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13022,7 +9062,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13045,15 +9085,15 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13080,41 +9120,10 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} } - m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13139,7 +9148,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { +func (m *IngressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13162,37 +9171,17 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Privileged = bool(v != 0) - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13202,29 +9191,33 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13234,29 +9227,31 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13266,27 +9261,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13314,31 +9311,62 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.IngressClassName = &s iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.HostNetwork = bool(v != 0) - case 7: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13365,16 +9393,65 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13384,37 +9461,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.HostPID = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - m.HostIPC = bool(v != 0) - case 10: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13424,28 +9493,77 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 11: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13472,13 +9590,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 12: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13505,13 +9623,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 13: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13538,75 +9656,63 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.ReadOnlyRootFilesystem = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - b := bool(v != 0) - m.DefaultAllowPrivilegeEscalation = &b - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13633,14 +9739,14 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 18: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13667,80 +9773,66 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.To = append(m.To, NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 21: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13750,27 +9842,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 22: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13797,16 +9891,64 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 23: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13833,14 +9975,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 24: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13867,10 +10008,8 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13895,7 +10034,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSet) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13918,15 +10057,15 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13953,13 +10092,16 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.PodSelector == nil { + m.PodSelector = &v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13986,13 +10128,16 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14019,7 +10164,10 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.IPBlock == nil { + m.IPBlock = &IPBlock{} + } + if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14044,7 +10192,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14067,15 +10215,15 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14103,13 +10251,120 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &intstr.IntOrString{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EndPort = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14119,27 +10374,28 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14166,15 +10422,16 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14184,27 +10441,29 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14232,7 +10491,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -14255,7 +10514,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14278,48 +10537,15 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14346,8 +10572,8 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ReplicaSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14372,7 +10598,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14395,17 +10621,17 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14415,15 +10641,28 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14450,16 +10689,13 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14486,29 +10722,10 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14530,7 +10747,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14553,17 +10770,17 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Replicas = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14573,35 +10790,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.FullyLabeledReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FullyLabeledReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.ObservedGeneration = 0 + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14611,35 +10822,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } - m.AvailableReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14649,16 +10854,30 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 6: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14668,81 +10887,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, ReplicaSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollbackConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - m.Revision = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14752,11 +10919,24 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14778,7 +10958,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14801,15 +10981,15 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14836,16 +11016,13 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14872,10 +11049,8 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14900,7 +11075,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14923,15 +11098,35 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14958,16 +11153,16 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14994,13 +11189,29 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -15022,7 +11233,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15045,17 +11256,74 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) } - var stringLen uint64 + m.ReadyReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15065,27 +11333,33 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15112,8 +11386,8 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15138,7 +11412,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollbackConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15161,49 +11435,17 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) } - var msglen int + m.Revision = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15213,26 +11455,11 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -15254,7 +11481,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15277,17 +11504,17 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15297,29 +11524,33 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15329,24 +11560,27 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -15369,7 +11603,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15392,17 +11626,17 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15412,27 +11646,31 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15459,10 +11697,10 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &v11.SELinuxOptions{} + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15933,122 +12171,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 0509bc3d..3ab6a093 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -30,37 +30,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/extensions/v1beta1"; -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -message AllowedCSIDriver { - // Name is the registered name of the CSI driver - optional string name = 1; -} - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -message AllowedFlexVolume { - // driver is the name of the Flexvolume driver. - optional string driver = 1; -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -message AllowedHostPath { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - optional string pathPrefix = 1; - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - optional bool readOnly = 2; -} - // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for // more information. // DaemonSet represents the configuration of a daemon set. @@ -398,19 +367,6 @@ message DeploymentStrategy { optional RollingUpdateDeployment rollingUpdate = 2; } -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -message FSGroupStrategyOptions { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { @@ -453,27 +409,6 @@ message HTTPIngressRuleValue { repeated HTTPIngressPath paths = 1; } -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -message IDRange { - // min is the start of the range, inclusive. - optional int64 min = 1; - - // max is the end of the range, inclusive. - optional int64 max = 2; -} - // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. // IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs @@ -875,164 +810,6 @@ message NetworkPolicyStatus { repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; } -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - // +optional - optional PodSecurityPolicySpec spec = 2; -} - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - // +optional - optional bool privileged = 1; - - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - repeated string defaultAddCapabilities = 2; - - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - repeated string requiredDropCapabilities = 3; - - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - repeated string allowedCapabilities = 4; - - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - optional RunAsGroupStrategyOptions runAsGroup = 22; - - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - optional bool readOnlyRootFilesystem = 14; - - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - optional bool defaultAllowPrivilegeEscalation = 15; - - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - optional bool allowPrivilegeEscalation = 16; - - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - repeated AllowedHostPath allowedHostPaths = 17; - - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - repeated AllowedFlexVolume allowedFlexVolumes = 18; - - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - repeated AllowedCSIDriver allowedCSIDrivers = 23; - - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - repeated string allowedUnsafeSysctls = 19; - - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - repeated string forbiddenSysctls = 20; - - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - repeated string allowedProcMountTypes = 21; - - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - optional RuntimeClassStrategyOptions runtimeClass = 24; -} - // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for // more information. // ReplicaSet ensures that a specified number of pod replicas are running at any given time. @@ -1227,57 +1004,6 @@ message RollingUpdateDeployment { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -message RunAsGroupStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -message RunAsUserStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -message RuntimeClassStrategyOptions { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - repeated string allowedRuntimeClassNames = 1; - - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - optional string defaultRuntimeClassName = 2; -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -message SELinuxStrategyOptions { - // rule is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; -} - // represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. @@ -1305,7 +1031,7 @@ message ScaleStatus { // actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic map selector = 2; @@ -1320,16 +1046,3 @@ message ScaleStatus { optional string targetSelector = 3; } -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -message SupplementalGroupsStrategyOptions { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go index c69eff0b..d58908ed 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/register.go +++ b/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -54,8 +54,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &ReplicaSet{}, &ReplicaSetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, &NetworkPolicy{}, &NetworkPolicyList{}, ) diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index be1b95e6..c0ac6fa2 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -35,7 +35,7 @@ type ScaleStatus struct { // actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` @@ -1021,389 +1021,6 @@ type ReplicaSetCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.2 -// +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.16 -// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicy - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -type PodSecurityPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the policy enforced. - // +optional - Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -type PodSecurityPolicySpec struct { - // privileged determines if a pod can request to be run as privileged. - // +optional - Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` - // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -type AllowedHostPath struct { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// FSType gives strong typing to different file systems that are used by volumes. -// Deprecated: use FSType from policy API Group instead. -type FSType string - -const ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - Quobyte FSType = "quobyte" - AzureDisk FSType = "azureDisk" - CSI FSType = "csi" - All FSType = "*" -) - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -type AllowedFlexVolume struct { - // driver is the name of the Flexvolume driver. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` -} - -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -type HostPortRange struct { - // min is the start of the range, inclusive. - Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -type SELinuxStrategyOptions struct { - // rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security Context. -// Deprecated: use SELinuxStrategy from policy API Group instead. -type SELinuxStrategy string - -const ( - // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. - // Deprecated: use SELinuxStrategyMustRunAs from policy API Group instead. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. - // Deprecated: use SELinuxStrategyRunAsAny from policy API Group instead. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -type RunAsUserStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -type IDRange struct { - // min is the start of the range, inclusive. - Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// Security Context. -// Deprecated: use RunAsUserStrategy from policy API Group instead. -type RunAsUserStrategy string - -const ( - // RunAsUserStrategyMustRunAs means that container must run as a particular uid. - // Deprecated: use RunAsUserStrategyMustRunAs from policy API Group instead. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. - // Deprecated: use RunAsUserStrategyMustRunAsNonRoot from policy API Group instead. - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // RunAsUserStrategyRunAsAny means that container may make requests for any uid. - // Deprecated: use RunAsUserStrategyRunAsAny from policy API Group instead. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -// Deprecated: use RunAsGroupStrategy from policy API Group instead. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -type FSGroupStrategyOptions struct { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -// Deprecated: use FSGroupStrategyType from policy API Group instead. -type FSGroupStrategyType string - -const ( - // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. - // Deprecated: use FSGroupStrategyMustRunAs from policy API Group instead. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. - // Deprecated: use FSGroupStrategyRunAsAny from policy API Group instead. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -type SupplementalGroupsStrategyOptions struct { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -// Deprecated: use SupplementalGroupsStrategyType from policy API Group instead. -type SupplementalGroupsStrategyType string - -const ( - // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use SupplementalGroupsStrategyMustRunAs from policy API Group instead. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use SupplementalGroupsStrategyRunAsAny from policy API Group instead. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.2 -// +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.16 -// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicyList - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -type PodSecurityPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is a list of schema objects. - Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.3 diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 302eb953..39aaf485 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -24,37 +24,9 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - -var map_AllowedFlexVolume = map[string]string{ - "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", - "driver": "driver is the name of the Flexvolume driver.", -} - -func (AllowedFlexVolume) SwaggerDoc() map[string]string { - return map_AllowedFlexVolume -} - -var map_AllowedHostPath = map[string]string{ - "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined. Deprecated: use AllowedHostPath from policy API Group instead.", - "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", - "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", -} - -func (AllowedHostPath) SwaggerDoc() map[string]string { - return map_AllowedHostPath -} - var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -220,16 +192,6 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { return map_DeploymentStrategy } -var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use FSGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_FSGroupStrategyOptions -} - var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", @@ -250,26 +212,6 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { return map_HTTPIngressRuleValue } -var map_HostPortRange = map[string]string{ - "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined. Deprecated: use HostPortRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (HostPortRange) SwaggerDoc() map[string]string { - return map_HostPortRange -} - -var map_IDRange = map[string]string{ - "": "IDRange provides a min/max of an allowed range of IDs. Deprecated: use IDRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (IDRange) SwaggerDoc() map[string]string { - return map_IDRange -} - var map_IPBlock = map[string]string{ "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", @@ -476,58 +418,6 @@ func (NetworkPolicyStatus) SwaggerDoc() map[string]string { return map_NetworkPolicyStatus } -var map_PodSecurityPolicy = map[string]string{ - "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec defines the policy enforced.", -} - -func (PodSecurityPolicy) SwaggerDoc() map[string]string { - return map_PodSecurityPolicy -} - -var map_PodSecurityPolicyList = map[string]string{ - "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is a list of schema objects.", -} - -func (PodSecurityPolicyList) SwaggerDoc() map[string]string { - return map_PodSecurityPolicyList -} - -var map_PodSecurityPolicySpec = map[string]string{ - "": "PodSecurityPolicySpec defines the policy enforced. Deprecated: use PodSecurityPolicySpec from policy API Group instead.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", - "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", - "volumes": "volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", - "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", - "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "allowedHostPaths": "allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.", - "allowedFlexVolumes": "allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", - "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", - "allowedProcMountTypes": "AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", -} - -func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { - return map_PodSecurityPolicySpec -} - var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -617,46 +507,6 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { return map_RollingUpdateDeployment } -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - -var map_RunAsUserStrategyOptions = map[string]string{ - "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsUserStrategyOptions -} - -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - -var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", -} - -func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { - return map_SELinuxStrategyOptions -} - var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", @@ -680,7 +530,7 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } @@ -688,14 +538,4 @@ func (ScaleStatus) SwaggerDoc() map[string]string { return map_ScaleStatus } -var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { - return map_SupplementalGroupsStrategyOptions -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 671aa2d9..b6e92729 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -28,54 +28,6 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { - if in == nil { - return nil - } - out := new(AllowedCSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { - if in == nil { - return nil - } - out := new(AllowedFlexVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { - if in == nil { - return nil - } - out := new(AllowedHostPath) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { *out = *in @@ -435,27 +387,6 @@ func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. -func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { - if in == nil { - return nil - } - out := new(FSGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in @@ -501,38 +432,6 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. -func (in *HostPortRange) DeepCopy() *HostPortRange { - if in == nil { - return nil - } - out := new(HostPortRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IDRange) DeepCopyInto(out *IDRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. -func (in *IDRange) DeepCopy() *IDRange { - if in == nil { - return nil - } - out := new(IDRange) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in @@ -1062,161 +961,6 @@ func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. -func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { - if in == nil { - return nil - } - out := new(PodSecurityPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. -func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { - if in == nil { - return nil - } - out := new(PodSecurityPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - in.SELinux.DeepCopyInto(&out.SELinux) - in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } - in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) - in.FSGroup.DeepCopyInto(&out.FSGroup) - if in.DefaultAllowPrivilegeEscalation != nil { - in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowedHostPaths != nil { - in, out := &in.AllowedHostPaths, &out.AllowedHostPaths - *out = make([]AllowedHostPath, len(*in)) - copy(*out, *in) - } - if in.AllowedFlexVolumes != nil { - in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes - *out = make([]AllowedFlexVolume, len(*in)) - copy(*out, *in) - } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ForbiddenSysctls != nil { - in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowedProcMountTypes != nil { - in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes - *out = make([]corev1.ProcMountType, len(*in)) - copy(*out, *in) - } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. -func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { - if in == nil { - return nil - } - out := new(PodSecurityPolicySpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { *out = *in @@ -1413,95 +1157,6 @@ func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. -func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsUserStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(corev1.SELinuxOptions) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. -func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { - if in == nil { - return nil - } - out := new(SELinuxStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Scale) DeepCopyInto(out *Scale) { *out = *in @@ -1568,24 +1223,3 @@ func (in *ScaleStatus) DeepCopy() *ScaleStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. -func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { - if in == nil { - return nil - } - out := new(SupplementalGroupsStrategyOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go index 963aaffb..5c935422 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go @@ -235,54 +235,6 @@ func (in *NetworkPolicyList) APILifecycleRemoved() (major, minor int) { return 1, 16 } -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicy) APILifecycleIntroduced() (major, minor int) { - return 1, 2 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicy) APILifecycleDeprecated() (major, minor int) { - return 1, 11 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PodSecurityPolicy) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicy"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 16 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleIntroduced() (major, minor int) { - return 1, 2 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleDeprecated() (major, minor int) { - return 1, 11 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicyList"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 16 -} - // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *ReplicaSet) APILifecycleIntroduced() (major, minor int) { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go index ac6f7179..c95999fa 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index fe4f8022..fc08e128 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go index 4bedcce3..b2eff7f9 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go index e2bd27e8..728252c0 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta3 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index 8196a14b..ed194a89 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -33,14 +33,14 @@ option go_package = "k8s.io/api/networking/v1"; // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -56,7 +56,7 @@ message HTTPIngressPath { // Implementations are required to support all path types. optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; } @@ -67,7 +67,7 @@ message HTTPIngressPath { // to match against everything after the last '/' and before the first '?' // or '#'. message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic repeated HTTPIngressPath paths = 1; } @@ -76,13 +76,13 @@ message HTTPIngressRuleValue { // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. message IPBlock { - // CIDR is a string representing the IP Block + // cidr is a string representing the IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" optional string cidr = 1; - // Except is a slice of CIDRs that should not be included within an IP Block + // except is a slice of CIDRs that should not be included within an IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" - // Except values will be rejected if they are outside the CIDR range + // Except values will be rejected if they are outside the cidr range // +optional repeated string except = 2; } @@ -97,12 +97,12 @@ message Ingress { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; @@ -110,12 +110,12 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional optional IngressServiceBackend service = 4; - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". @@ -134,7 +134,7 @@ message IngressClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressClassSpec spec = 2; @@ -146,31 +146,31 @@ message IngressClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of IngressClasses. + // items is the list of IngressClasses. repeated IngressClass items = 2; } // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. message IngressClassParametersReference { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional optional string aPIGroup = 1; - // Kind is the type of resource being referenced. + // kind is the type of resource being referenced. optional string kind = 2; - // Name is the name of resource being referenced. + // name is the name of resource being referenced. optional string name = 3; - // Scope represents if this refers to a cluster or namespace scoped resource. + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional optional string scope = 4; - // Namespace is the namespace of the resource being referenced. This field is + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -179,15 +179,15 @@ message IngressClassParametersReference { // IngressClassSpec provides information about the class of an Ingress. message IngressClassSpec { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. optional string controller = 1; - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -201,21 +201,21 @@ message IngressList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Ingress. + // items is the list of Ingress. repeated Ingress items = 2; } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. message IngressLoadBalancerIngress { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional optional string ip = 1; - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional optional string hostname = 2; - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional repeated IngressPortStatus ports = 4; @@ -223,21 +223,21 @@ message IngressLoadBalancerIngress { // IngressLoadBalancerStatus represents the status of a load-balancer. message IngressLoadBalancerStatus { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional repeated IngressLoadBalancerIngress ingress = 1; } // IngressPortStatus represents the error condition of a service port message IngressPortStatus { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. optional int32 port = 1; - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" optional string protocol = 2; - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -256,7 +256,7 @@ message IngressPortStatus { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -269,14 +269,14 @@ message IngressRule { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional optional string host = 1; @@ -301,18 +301,18 @@ message IngressRuleValue { // IngressServiceBackend references a Kubernetes Service as a Backend. message IngressServiceBackend { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. optional string name = 1; - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. optional ServiceBackendPort port = 2; } // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of an IngressClass cluster resource. Ingress + // ingressClassName is the name of an IngressClass cluster resource. Ingress // controller implementations use this field to know whether they should be // serving this Ingress resource, by a transitive connection // (controller -> IngressClass -> Ingress resource). Although the @@ -325,24 +325,24 @@ message IngressSpec { // +optional optional string ingressClassName = 4; - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional optional IngressBackend defaultBackend = 1; - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional repeated IngressTLS tls = 2; - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. + // rules is a list of host rules used to configure the Ingress. If unspecified, + // or no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional repeated IngressRule rules = 3; @@ -350,14 +350,14 @@ message IngressSpec { // IngressStatus describe the current state of the Ingress. message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional optional IngressLoadBalancerStatus loadBalancer = 1; } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. @@ -365,11 +365,11 @@ message IngressTLS { // +optional repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional optional string secretName = 2; } @@ -381,11 +381,11 @@ message NetworkPolicy { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional optional NetworkPolicySpec spec = 2; - // Status is the current state of the NetworkPolicy. + // status represents the current state of the NetworkPolicy. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NetworkPolicyStatus status = 3; @@ -395,7 +395,7 @@ message NetworkPolicy { // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 message NetworkPolicyEgressRule { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows @@ -403,7 +403,7 @@ message NetworkPolicyEgressRule { // +optional repeated NetworkPolicyPort ports = 1; - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule @@ -415,15 +415,15 @@ message NetworkPolicyEgressRule { // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. message NetworkPolicyIngressRule { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional repeated NetworkPolicyPort ports = 1; - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule @@ -439,32 +439,32 @@ message NetworkPolicyList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated NetworkPolicy items = 2; } // NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of // fields are allowed message NetworkPolicyPeer { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional optional IPBlock ipBlock = 3; @@ -472,19 +472,19 @@ message NetworkPolicyPeer { // NetworkPolicyPort describes a port to allow traffic on message NetworkPolicyPort { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional optional string protocol = 1; - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. @@ -494,16 +494,16 @@ message NetworkPolicyPort { // NetworkPolicySpec provides the specification of a NetworkPolicy message NetworkPolicySpec { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If @@ -512,8 +512,8 @@ message NetworkPolicySpec { // +optional repeated NetworkPolicyIngressRule ingress = 2; - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves @@ -522,23 +522,23 @@ message NetworkPolicySpec { // +optional repeated NetworkPolicyEgressRule egress = 3; - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional repeated string policyTypes = 4; } -// NetworkPolicyStatus describe the current state of the NetworkPolicy. +// NetworkPolicyStatus describes the current state of the NetworkPolicy. message NetworkPolicyStatus { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. + // conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. // Current service state // +optional // +patchMergeKey=type @@ -550,12 +550,12 @@ message NetworkPolicyStatus { // ServiceBackendPort is the service port being referenced. message ServiceBackendPort { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". // +optional optional string name = 1; - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional optional int32 number = 2; diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index a9deb900..fa7cf1bd 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -28,16 +28,17 @@ import ( // NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the NetworkPolicy. + // status represents the current state of the NetworkPolicy. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -57,16 +58,16 @@ const ( // NetworkPolicySpec provides the specification of a NetworkPolicy type NetworkPolicySpec struct { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If @@ -75,8 +76,8 @@ type NetworkPolicySpec struct { // +optional Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves @@ -85,15 +86,15 @@ type NetworkPolicySpec struct { // +optional Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` @@ -102,15 +103,15 @@ type NetworkPolicySpec struct { // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. type NetworkPolicyIngressRule struct { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule @@ -123,7 +124,7 @@ type NetworkPolicyIngressRule struct { // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 type NetworkPolicyEgressRule struct { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows @@ -131,7 +132,7 @@ type NetworkPolicyEgressRule struct { // +optional Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule @@ -142,19 +143,19 @@ type NetworkPolicyEgressRule struct { // NetworkPolicyPort describes a port to allow traffic on type NetworkPolicyPort struct { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. @@ -166,12 +167,13 @@ type NetworkPolicyPort struct { // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. type IPBlock struct { - // CIDR is a string representing the IP Block + // cidr is a string representing the IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` - // Except is a slice of CIDRs that should not be included within an IP Block + + // except is a slice of CIDRs that should not be included within an IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" - // Except values will be rejected if they are outside the CIDR range + // Except values will be rejected if they are outside the cidr range // +optional Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` } @@ -179,25 +181,25 @@ type IPBlock struct { // NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of // fields are allowed type NetworkPolicyPeer struct { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` @@ -233,9 +235,9 @@ const ( NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported" ) -// NetworkPolicyStatus describe the current state of the NetworkPolicy. +// NetworkPolicyStatus describes the current state of the NetworkPolicy. type NetworkPolicyStatus struct { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. + // conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. // Current service state // +optional // +patchMergeKey=type @@ -250,12 +252,13 @@ type NetworkPolicyStatus struct { // NetworkPolicyList is a list of NetworkPolicy objects. type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -268,17 +271,18 @@ type NetworkPolicyList struct { // based virtual hosting etc. type Ingress struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -289,18 +293,19 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of an IngressClass cluster resource. Ingress + // ingressClassName is the name of an IngressClass cluster resource. Ingress // controller implementations use this field to know whether they should be // serving this Ingress resource, by a transitive connection // (controller -> IngressClass -> Ingress resource). Although the @@ -313,72 +318,73 @@ type IngressSpec struct { // +optional IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional DefaultBackend *IngressBackend `json:"defaultBackend,omitempty" protobuf:"bytes,1,opt,name=defaultBackend"` - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. + // rules is a list of host rules used to configure the Ingress. If unspecified, + // or no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +listType=atomic // +optional Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` } // IngressStatus describe the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` } // IngressLoadBalancerStatus represents the status of a load-balancer. type IngressLoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. type IngressLoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` @@ -386,14 +392,14 @@ type IngressLoadBalancerIngress struct { // IngressPortStatus represents the error condition of a service port type IngressPortStatus struct { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -412,7 +418,7 @@ type IngressPortStatus struct { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -425,14 +431,14 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` @@ -460,7 +466,7 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` } @@ -499,14 +505,14 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -522,19 +528,19 @@ type HTTPIngressPath struct { // Implementations are required to support all path types. PathType *PathType `json:"pathType" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional Service *IngressServiceBackend `json:"service,omitempty" protobuf:"bytes,4,opt,name=service"` - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". @@ -544,23 +550,23 @@ type IngressBackend struct { // IngressServiceBackend references a Kubernetes Service as a Backend. type IngressServiceBackend struct { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. Port ServiceBackendPort `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` } // ServiceBackendPort is the service port being referenced. type ServiceBackendPort struct { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional Number int32 `json:"number,omitempty" protobuf:"bytes,2,opt,name=number"` @@ -577,12 +583,13 @@ type ServiceBackendPort struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -590,15 +597,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -617,20 +624,24 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"` - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -642,10 +653,11 @@ type IngressClassParametersReference struct { // IngressClassList is a collection of IngressClasses. type IngressClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index 94ccf964..91161d5c 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "path": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "pathType": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", + "backend": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (HTTPIngressPath) SwaggerDoc() map[string]string { var map_HTTPIngressRuleValue = map[string]string{ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", + "paths": "paths is a collection of paths that map requests to backends.", } func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { @@ -49,8 +49,8 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_IPBlock = map[string]string{ "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the CIDR range", + "cidr": "cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", + "except": "except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the cidr range", } func (IPBlock) SwaggerDoc() map[string]string { @@ -60,8 +60,8 @@ func (IPBlock) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -70,8 +70,8 @@ func (Ingress) SwaggerDoc() map[string]string { var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", - "service": "Service references a Service as a Backend. This is a mutually exclusive setting with \"Resource\".", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", + "service": "service references a service as a backend. This is a mutually exclusive setting with \"Resource\".", + "resource": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -81,7 +81,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressClass = map[string]string{ "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (IngressClass) SwaggerDoc() map[string]string { @@ -91,7 +91,7 @@ func (IngressClass) SwaggerDoc() map[string]string { var map_IngressClassList = map[string]string{ "": "IngressClassList is a collection of IngressClasses.", "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", + "items": "items is the list of IngressClasses.", } func (IngressClassList) SwaggerDoc() map[string]string { @@ -100,11 +100,11 @@ func (IngressClassList) SwaggerDoc() map[string]string { var map_IngressClassParametersReference = map[string]string{ "": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced.", - "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", - "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "apiGroup": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "kind is the type of resource being referenced.", + "name": "name is the name of resource being referenced.", + "scope": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "namespace": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } func (IngressClassParametersReference) SwaggerDoc() map[string]string { @@ -113,8 +113,8 @@ func (IngressClassParametersReference) SwaggerDoc() map[string]string { var map_IngressClassSpec = map[string]string{ "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "controller": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", } func (IngressClassSpec) SwaggerDoc() map[string]string { @@ -124,7 +124,7 @@ func (IngressClassSpec) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", + "items": "items is the list of Ingress.", } func (IngressList) SwaggerDoc() map[string]string { @@ -133,9 +133,9 @@ func (IngressList) SwaggerDoc() map[string]string { var map_IngressLoadBalancerIngress = map[string]string{ "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", - "ip": "IP is set for load-balancer ingress points that are IP based.", - "hostname": "Hostname is set for load-balancer ingress points that are DNS based.", - "ports": "Ports provides information about the ports exposed by this LoadBalancer.", + "ip": "ip is set for load-balancer ingress points that are IP based.", + "hostname": "hostname is set for load-balancer ingress points that are DNS based.", + "ports": "ports provides information about the ports exposed by this LoadBalancer.", } func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { @@ -144,7 +144,7 @@ func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { var map_IngressLoadBalancerStatus = map[string]string{ "": "IngressLoadBalancerStatus represents the status of a load-balancer.", - "ingress": "Ingress is a list containing ingress points for the load-balancer.", + "ingress": "ingress is a list containing ingress points for the load-balancer.", } func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { @@ -153,9 +153,9 @@ func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { var map_IngressPortStatus = map[string]string{ "": "IngressPortStatus represents the error condition of a service port", - "port": "Port is the port number of the ingress port.", - "protocol": "Protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", - "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", + "port": "port is the port number of the ingress port.", + "protocol": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", } func (IngressPortStatus) SwaggerDoc() map[string]string { @@ -164,7 +164,7 @@ func (IngressPortStatus) SwaggerDoc() map[string]string { var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If host is precise, the request matches this rule if the http host header is equal to Host. 2. If host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -181,8 +181,8 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressServiceBackend = map[string]string{ "": "IngressServiceBackend references a Kubernetes Service as a Backend.", - "name": "Name is the referenced service. The service must exist in the same namespace as the Ingress object.", - "port": "Port of the referenced service. A port name or port number is required for a IngressServiceBackend.", + "name": "name is the referenced service. The service must exist in the same namespace as the Ingress object.", + "port": "port of the referenced service. A port name or port number is required for a IngressServiceBackend.", } func (IngressServiceBackend) SwaggerDoc() map[string]string { @@ -191,10 +191,10 @@ func (IngressServiceBackend) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present.", - "defaultBackend": "DefaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "ingressClassName": "ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present.", + "defaultBackend": "defaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", + "tls": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -203,7 +203,7 @@ func (IngressSpec) SwaggerDoc() map[string]string { var map_IngressStatus = map[string]string{ "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", + "loadBalancer": "loadBalancer contains the current status of the load-balancer.", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -211,9 +211,9 @@ func (IngressStatus) SwaggerDoc() map[string]string { } var map_IngressTLS = map[string]string{ - "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "": "IngressTLS describes the transport layer security associated with an ingress.", + "hosts": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the \"Host\" header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { @@ -223,8 +223,8 @@ func (IngressTLS) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior for this NetworkPolicy.", - "status": "Status is the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec represents the specification of the desired behavior for this NetworkPolicy.", + "status": "status represents the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (NetworkPolicy) SwaggerDoc() map[string]string { @@ -233,8 +233,8 @@ func (NetworkPolicy) SwaggerDoc() map[string]string { var map_NetworkPolicyEgressRule = map[string]string{ "": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", - "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", + "ports": "ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "to": "to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", } func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { @@ -243,8 +243,8 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", - "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "ports": "ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -254,7 +254,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "NetworkPolicyList is a list of NetworkPolicy objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (NetworkPolicyList) SwaggerDoc() map[string]string { @@ -263,9 +263,9 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string { var map_NetworkPolicyPeer = map[string]string{ "": "NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed", - "podSelector": "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.", - "namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.", - "ipBlock": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", + "podSelector": "podSelector is a label selector which selects pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the pods matching podSelector in the policy's own namespace.", + "namespaceSelector": "namespaceSelector selects namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf podSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the namespaces selected by namespaceSelector. Otherwise it selects all pods in the namespaces selected by namespaceSelector.", + "ipBlock": "ipBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", } func (NetworkPolicyPeer) SwaggerDoc() map[string]string { @@ -274,9 +274,9 @@ func (NetworkPolicyPeer) SwaggerDoc() map[string]string { var map_NetworkPolicyPort = map[string]string{ "": "NetworkPolicyPort describes a port to allow traffic on", - "protocol": "The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", - "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", - "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", + "protocol": "protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", + "port": "port represents the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", + "endPort": "endPort indicates that the range of ports from port to endPort if set, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", } func (NetworkPolicyPort) SwaggerDoc() map[string]string { @@ -285,10 +285,10 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string { var map_NetworkPolicySpec = map[string]string{ "": "NetworkPolicySpec provides the specification of a NetworkPolicy", - "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", - "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", - "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "ingress": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", + "egress": "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", + "policyTypes": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { @@ -296,8 +296,8 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { } var map_NetworkPolicyStatus = map[string]string{ - "": "NetworkPolicyStatus describe the current state of the NetworkPolicy.", - "conditions": "Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state", + "": "NetworkPolicyStatus describes the current state of the NetworkPolicy.", + "conditions": "conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state", } func (NetworkPolicyStatus) SwaggerDoc() map[string]string { @@ -306,8 +306,8 @@ func (NetworkPolicyStatus) SwaggerDoc() map[string]string { var map_ServiceBackendPort = map[string]string{ "": "ServiceBackendPort is the service port being referenced.", - "name": "Name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", - "number": "Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", + "name": "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", + "number": "number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", } func (ServiceBackendPort) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go index 48d401db..f54d1f82 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go @@ -31,6 +31,8 @@ import ( math_bits "math/bits" reflect "reflect" strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -128,10 +130,126 @@ func (m *ClusterCIDRSpec) XXX_DiscardUnknown() { var xxx_messageInfo_ClusterCIDRSpec proto.InternalMessageInfo +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{3} +} +func (m *IPAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) +} +func (m *IPAddress) XXX_Size() int { + return m.Size() +} +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddress proto.InternalMessageInfo + +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{4} +} +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) +} +func (m *IPAddressList) XXX_Size() int { + return m.Size() +} +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo + +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{5} +} +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) +} +func (m *IPAddressSpec) XXX_Size() int { + return m.Size() +} +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo + +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{6} +} +func (m *ParentReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) +} +func (m *ParentReference) XXX_Size() int { + return m.Size() +} +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ParentReference proto.InternalMessageInfo + func init() { proto.RegisterType((*ClusterCIDR)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDR") proto.RegisterType((*ClusterCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRList") proto.RegisterType((*ClusterCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRSpec") + proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress") + proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList") + proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec") + proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference") } func init() { @@ -139,39 +257,51 @@ func init() { } var fileDescriptor_c1b7ac8d7d97acec = []byte{ - // 506 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x4f, 0x8f, 0xd2, 0x40, - 0x18, 0xc6, 0xe9, 0x2e, 0x24, 0x6b, 0xc1, 0xb0, 0xe9, 0x45, 0xc2, 0x61, 0x20, 0x9c, 0x48, 0x8c, - 0x33, 0xb2, 0x21, 0xc4, 0xab, 0xdd, 0x4d, 0x94, 0xc4, 0x3f, 0xd8, 0x4d, 0x3c, 0x18, 0x0f, 0x0e, - 0xe5, 0xb5, 0x8c, 0xd0, 0xce, 0x64, 0x66, 0xa8, 0xf1, 0xe6, 0x47, 0xf0, 0x2b, 0xe9, 0x89, 0xe3, - 0x1e, 0xf7, 0x44, 0xa4, 0x7e, 0x01, 0x3f, 0x82, 0x99, 0xa1, 0xbb, 0x94, 0x45, 0x57, 0xbd, 0x75, - 0xde, 0xf9, 0x3d, 0xcf, 0xfb, 0x3e, 0x7d, 0x5b, 0xf7, 0xc9, 0xec, 0x91, 0xc2, 0x8c, 0x93, 0xd9, - 0x62, 0x0c, 0x32, 0x01, 0x0d, 0x8a, 0xa4, 0x90, 0x4c, 0xb8, 0x24, 0xf9, 0x05, 0x15, 0x8c, 0x24, - 0xa0, 0x3f, 0x72, 0x39, 0x63, 0x49, 0x44, 0xd2, 0x1e, 0x9d, 0x8b, 0x29, 0xed, 0x91, 0x08, 0x12, - 0x90, 0x54, 0xc3, 0x04, 0x0b, 0xc9, 0x35, 0xf7, 0xd0, 0x86, 0xc7, 0x54, 0x30, 0xbc, 0xe5, 0xf1, - 0x15, 0xdf, 0x7c, 0x10, 0x31, 0x3d, 0x5d, 0x8c, 0x71, 0xc8, 0x63, 0x12, 0xf1, 0x88, 0x13, 0x2b, - 0x1b, 0x2f, 0xde, 0xdb, 0x93, 0x3d, 0xd8, 0xa7, 0x8d, 0x5d, 0xb3, 0x53, 0x68, 0x1f, 0x72, 0x09, - 0x24, 0xdd, 0x6b, 0xd9, 0xec, 0x6f, 0x99, 0x98, 0x86, 0x53, 0x96, 0x80, 0xfc, 0x44, 0xc4, 0x2c, - 0x32, 0x05, 0x45, 0x62, 0xd0, 0xf4, 0x77, 0x2a, 0xf2, 0x27, 0x95, 0x5c, 0x24, 0x9a, 0xc5, 0xb0, - 0x27, 0x18, 0xfc, 0x4d, 0xa0, 0xc2, 0x29, 0xc4, 0xf4, 0xa6, 0xae, 0xf3, 0xcd, 0x71, 0xab, 0xa7, - 0xf3, 0x85, 0xd2, 0x20, 0x4f, 0x87, 0x67, 0x81, 0xf7, 0xce, 0x3d, 0x32, 0x33, 0x4d, 0xa8, 0xa6, - 0x0d, 0xa7, 0xed, 0x74, 0xab, 0x27, 0x0f, 0xf1, 0xf6, 0xa5, 0x5d, 0x5b, 0x63, 0x31, 0x8b, 0x4c, - 0x41, 0x61, 0x43, 0xe3, 0xb4, 0x87, 0x5f, 0x8e, 0x3f, 0x40, 0xa8, 0x9f, 0x83, 0xa6, 0xbe, 0xb7, - 0x5c, 0xb5, 0x4a, 0xd9, 0xaa, 0xe5, 0x6e, 0x6b, 0xc1, 0xb5, 0xab, 0xf7, 0xca, 0x2d, 0x2b, 0x01, - 0x61, 0xe3, 0xc0, 0xba, 0x13, 0x7c, 0xfb, 0x4a, 0x70, 0x61, 0xb8, 0x73, 0x01, 0xa1, 0x5f, 0xcb, - 0xcd, 0xcb, 0xe6, 0x14, 0x58, 0xab, 0xce, 0x57, 0xc7, 0xad, 0x17, 0xb8, 0x67, 0x4c, 0x69, 0xef, - 0xed, 0x5e, 0x10, 0xfc, 0x6f, 0x41, 0x8c, 0xda, 0xc6, 0x38, 0xce, 0x3b, 0x1d, 0x5d, 0x55, 0x0a, - 0x21, 0x46, 0x6e, 0x85, 0x69, 0x88, 0x55, 0xe3, 0xa0, 0x7d, 0xd8, 0xad, 0x9e, 0xdc, 0xff, 0x8f, - 0x14, 0xfe, 0xdd, 0xdc, 0xb7, 0x32, 0x34, 0x0e, 0xc1, 0xc6, 0xa8, 0xf3, 0x73, 0x37, 0x83, 0x49, - 0xe7, 0xbd, 0x76, 0x6b, 0x09, 0x9f, 0xc0, 0x39, 0xcc, 0x21, 0xd4, 0x5c, 0xe6, 0x39, 0xda, 0xc5, - 0x66, 0xe6, 0xb3, 0x33, 0x53, 0xbf, 0x28, 0x70, 0xfe, 0x71, 0xb6, 0x6a, 0xd5, 0x8a, 0x95, 0x60, - 0xc7, 0xc7, 0x7b, 0xec, 0xd6, 0x05, 0x48, 0x03, 0x3c, 0xe5, 0x4a, 0xfb, 0x4c, 0x2b, 0xbb, 0x8d, - 0x8a, 0x7f, 0x2f, 0x1f, 0xad, 0x3e, 0xda, 0xbd, 0x0e, 0x6e, 0xf2, 0x5e, 0xdb, 0x2d, 0x33, 0x91, - 0xf6, 0x1b, 0x87, 0x6d, 0xa7, 0x7b, 0x67, 0xbb, 0x94, 0xe1, 0x28, 0xed, 0x07, 0xf6, 0x26, 0x27, - 0x06, 0x8d, 0xf2, 0x1e, 0x31, 0xb0, 0xc4, 0xc0, 0x3f, 0x5b, 0xae, 0x51, 0xe9, 0x62, 0x8d, 0x4a, - 0x97, 0x6b, 0x54, 0xfa, 0x9c, 0x21, 0x67, 0x99, 0x21, 0xe7, 0x22, 0x43, 0xce, 0x65, 0x86, 0x9c, - 0xef, 0x19, 0x72, 0xbe, 0xfc, 0x40, 0xa5, 0x37, 0xe8, 0xf6, 0x7f, 0xfc, 0x57, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xdf, 0x1d, 0xe9, 0x86, 0x1d, 0x04, 0x00, 0x00, + // 698 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcf, 0x4e, 0xdb, 0x4a, + 0x14, 0xc6, 0x63, 0x92, 0x48, 0x78, 0x00, 0x85, 0xeb, 0xcd, 0x8d, 0x58, 0x38, 0xb9, 0xb9, 0x1b, + 0xae, 0x6e, 0x19, 0x03, 0x42, 0x51, 0xb7, 0x98, 0x48, 0x34, 0x52, 0x0b, 0xe9, 0x20, 0xba, 0xa8, + 0x58, 0xd4, 0xb1, 0x0f, 0x8e, 0x1b, 0xfc, 0x47, 0x33, 0xe3, 0x54, 0xec, 0xfa, 0x08, 0x7d, 0xa1, + 0x56, 0x6a, 0x57, 0x2c, 0x59, 0xb2, 0x8a, 0x8a, 0xfb, 0x02, 0x5d, 0xb7, 0x9b, 0x6a, 0x26, 0x4e, + 0xec, 0x24, 0x0d, 0xd0, 0x0d, 0xbb, 0xcc, 0x39, 0xbf, 0xf3, 0xcd, 0x39, 0x73, 0xbe, 0x24, 0xe8, + 0xb0, 0xff, 0x94, 0x61, 0x2f, 0x34, 0xfa, 0x71, 0x17, 0x68, 0x00, 0x1c, 0x98, 0x31, 0x80, 0xc0, + 0x09, 0xa9, 0x91, 0x26, 0xac, 0xc8, 0x33, 0x02, 0xe0, 0xef, 0x42, 0xda, 0xf7, 0x02, 0xd7, 0x18, + 0xec, 0x58, 0x17, 0x51, 0xcf, 0xda, 0x31, 0x5c, 0x08, 0x80, 0x5a, 0x1c, 0x1c, 0x1c, 0xd1, 0x90, + 0x87, 0x9a, 0x3e, 0xe2, 0xb1, 0x15, 0x79, 0x38, 0xe3, 0xf1, 0x98, 0xdf, 0xd8, 0x72, 0x3d, 0xde, + 0x8b, 0xbb, 0xd8, 0x0e, 0x7d, 0xc3, 0x0d, 0xdd, 0xd0, 0x90, 0x65, 0xdd, 0xf8, 0x5c, 0x9e, 0xe4, + 0x41, 0x7e, 0x1a, 0xc9, 0x6d, 0x34, 0x72, 0xd7, 0xdb, 0x21, 0x05, 0x63, 0x30, 0x77, 0xe5, 0xc6, + 0x5e, 0xc6, 0xf8, 0x96, 0xdd, 0xf3, 0x02, 0xa0, 0x97, 0x46, 0xd4, 0x77, 0x45, 0x80, 0x19, 0x3e, + 0x70, 0xeb, 0x77, 0x55, 0xc6, 0xa2, 0x2a, 0x1a, 0x07, 0xdc, 0xf3, 0x61, 0xae, 0xa0, 0x79, 0x5f, + 0x01, 0xb3, 0x7b, 0xe0, 0x5b, 0xb3, 0x75, 0x8d, 0x2f, 0x0a, 0x5a, 0x39, 0xb8, 0x88, 0x19, 0x07, + 0x7a, 0xd0, 0x6e, 0x11, 0xed, 0x0d, 0x5a, 0x16, 0x3d, 0x39, 0x16, 0xb7, 0xaa, 0x4a, 0x5d, 0xd9, + 0x5c, 0xd9, 0xdd, 0xc6, 0xd9, 0xa3, 0x4d, 0xa4, 0x71, 0xd4, 0x77, 0x45, 0x80, 0x61, 0x41, 0xe3, + 0xc1, 0x0e, 0x3e, 0xee, 0xbe, 0x05, 0x9b, 0xbf, 0x00, 0x6e, 0x99, 0xda, 0xd5, 0xb0, 0x56, 0x48, + 0x86, 0x35, 0x94, 0xc5, 0xc8, 0x44, 0x55, 0x7b, 0x89, 0x4a, 0x2c, 0x02, 0xbb, 0xba, 0x24, 0xd5, + 0x0d, 0x7c, 0xf7, 0x4a, 0x70, 0xae, 0xb9, 0x93, 0x08, 0x6c, 0x73, 0x35, 0x15, 0x2f, 0x89, 0x13, + 0x91, 0x52, 0x8d, 0xcf, 0x0a, 0xaa, 0xe4, 0xb8, 0xe7, 0x1e, 0xe3, 0xda, 0xd9, 0xdc, 0x20, 0xf8, + 0x61, 0x83, 0x88, 0x6a, 0x39, 0xc6, 0x7a, 0x7a, 0xd3, 0xf2, 0x38, 0x92, 0x1b, 0xa2, 0x83, 0xca, + 0x1e, 0x07, 0x9f, 0x55, 0x97, 0xea, 0xc5, 0xcd, 0x95, 0xdd, 0xff, 0xff, 0x60, 0x0a, 0x73, 0x2d, + 0xd5, 0x2d, 0xb7, 0x85, 0x02, 0x19, 0x09, 0x35, 0xbe, 0x4f, 0xcf, 0x20, 0xa6, 0xd3, 0x5e, 0xa1, + 0xd5, 0x20, 0x74, 0xe0, 0x04, 0x2e, 0xc0, 0xe6, 0x21, 0x4d, 0xe7, 0xa8, 0xe7, 0x2f, 0x13, 0xb6, + 0x13, 0x5d, 0x1f, 0xe5, 0x38, 0x73, 0x3d, 0x19, 0xd6, 0x56, 0xf3, 0x11, 0x32, 0xa5, 0xa3, 0xed, + 0xa3, 0x4a, 0x04, 0x54, 0x00, 0xcf, 0x42, 0xc6, 0x4d, 0x8f, 0x33, 0xb9, 0x8d, 0xb2, 0xf9, 0x77, + 0xda, 0x5a, 0xa5, 0x33, 0x9d, 0x26, 0xb3, 0xbc, 0x56, 0x47, 0x25, 0x2f, 0x1a, 0xec, 0x55, 0x8b, + 0x75, 0x65, 0x53, 0xcd, 0x96, 0xd2, 0xee, 0x0c, 0xf6, 0x88, 0xcc, 0xa4, 0x44, 0xb3, 0x5a, 0x9a, + 0x23, 0x9a, 0x92, 0x68, 0x36, 0x3e, 0x29, 0x48, 0x6d, 0x77, 0xf6, 0x1d, 0x87, 0x02, 0x63, 0x8f, + 0xe0, 0xbc, 0xe3, 0x29, 0xe7, 0x6d, 0xdd, 0xb7, 0xb3, 0x49, 0x6b, 0x0b, 0x7d, 0xf7, 0x51, 0x41, + 0x6b, 0x13, 0xea, 0x11, 0x5c, 0x77, 0x34, 0xed, 0xba, 0xff, 0x1e, 0x3c, 0xc1, 0x02, 0xcf, 0xf9, + 0xb9, 0xf6, 0xa5, 0xe1, 0xce, 0x90, 0x1a, 0x59, 0x14, 0x02, 0x4e, 0xe0, 0x3c, 0xed, 0xff, 0xde, + 0x2f, 0x68, 0x67, 0x5c, 0x00, 0x14, 0x02, 0x1b, 0xcc, 0xb5, 0x64, 0x58, 0x53, 0x27, 0x41, 0x92, + 0x09, 0x36, 0x7e, 0x2a, 0xa8, 0x32, 0x43, 0x6b, 0xff, 0xa2, 0xb2, 0x4b, 0xc3, 0x38, 0x92, 0xb7, + 0xa9, 0x59, 0x9f, 0x87, 0x22, 0x48, 0x46, 0x39, 0xed, 0x09, 0x5a, 0xa6, 0xc0, 0xc2, 0x98, 0xda, + 0x20, 0x97, 0xa7, 0x66, 0xaf, 0x44, 0xd2, 0x38, 0x99, 0x10, 0x9a, 0x81, 0xd4, 0xc0, 0xf2, 0x81, + 0x45, 0x96, 0x0d, 0xa9, 0x3f, 0xff, 0x4a, 0x71, 0xf5, 0x68, 0x9c, 0x20, 0x19, 0x23, 0x9c, 0x2a, + 0x0e, 0xb3, 0x4e, 0x15, 0x2c, 0x91, 0x19, 0xcd, 0x44, 0xc5, 0xd8, 0x73, 0xaa, 0x65, 0x09, 0x6c, + 0xa7, 0x40, 0xf1, 0xb4, 0xdd, 0xfa, 0x31, 0xac, 0xfd, 0xb3, 0xe8, 0x97, 0x97, 0x5f, 0x46, 0xc0, + 0xf0, 0x69, 0xbb, 0x45, 0x44, 0xb1, 0xd9, 0xba, 0xba, 0xd5, 0x0b, 0xd7, 0xb7, 0x7a, 0xe1, 0xe6, + 0x56, 0x2f, 0xbc, 0x4f, 0x74, 0xe5, 0x2a, 0xd1, 0x95, 0xeb, 0x44, 0x57, 0x6e, 0x12, 0x5d, 0xf9, + 0x9a, 0xe8, 0xca, 0x87, 0x6f, 0x7a, 0xe1, 0xb5, 0x7e, 0xf7, 0x3f, 0xda, 0xaf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xf9, 0x9d, 0x9e, 0xc6, 0x0b, 0x07, 0x00, 0x00, } func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) { @@ -312,6 +442,179 @@ func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *IPAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ParentRef != nil { + { + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ParentReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset @@ -350,82 +653,597 @@ func (m *ClusterCIDRList) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - return n -} - -func (m *ClusterCIDRSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NodeSelector != nil { - l = m.NodeSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) - l = len(m.IPv4) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IPv6) - n += 1 + l + sovGenerated(uint64(l)) - return n -} + return n +} + +func (m *ClusterCIDRSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) + l = len(m.IPv4) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IPv6) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParentRef != nil { + l = m.ParentRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ParentReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterCIDR{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterCIDRList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterCIDR{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterCIDRList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterCIDRSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterCIDRSpec{`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, + `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, + `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]IPAddress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IPAddressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParentReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterCIDR{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) + } + m.PerNodeHostBits = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PerNodeHostBits |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPv4 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPv6 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ClusterCIDR) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCIDR{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCIDRList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ClusterCIDR{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ClusterCIDRList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCIDRSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCIDRSpec{`, - `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, - `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, - `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, - `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { +func (m *IPAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -448,10 +1266,10 @@ func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -541,7 +1359,7 @@ func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { +func (m *IPAddressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -564,10 +1382,10 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -632,7 +1450,7 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ClusterCIDR{}) + m.Items = append(m.Items, IPAddress{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -658,7 +1476,7 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -681,15 +1499,15 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -716,18 +1534,100 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeSelector == nil { - m.NodeSelector = &v11.NodeSelector{} + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} } - if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParentReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - m.PerNodeHostBits = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -737,14 +1637,27 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PerNodeHostBits |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -772,11 +1685,11 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IPv4 = string(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -804,7 +1717,39 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IPv6 = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto index bbda585b..0f1f30d7 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.proto @@ -44,7 +44,7 @@ message ClusterCIDR { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the ClusterCIDR. + // spec is the desired state of the ClusterCIDR. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ClusterCIDRSpec spec = 2; @@ -57,19 +57,19 @@ message ClusterCIDRList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of ClusterCIDRs. + // items is the list of ClusterCIDRs. repeated ClusterCIDR items = 2; } // ClusterCIDRSpec defines the desired state of ClusterCIDR. message ClusterCIDRSpec { - // NodeSelector defines which nodes the config is applicable to. - // An empty or nil NodeSelector selects all nodes. + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. // This field is immutable. // +optional optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1; - // PerNodeHostBits defines the number of host bits to be configured per node. + // perNodeHostBits defines the number of host bits to be configured per node. // A subnet mask determines how much of the address is used for network bits // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the // address into 24 bits for the network portion and 8 bits for the host portion. @@ -79,16 +79,77 @@ message ClusterCIDRSpec { // +required optional int32 perNodeHostBits = 2; - // IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of IPv4 and IPv6 must be specified. + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional optional string ipv4 = 3; - // IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of IPv4 and IPv6 must be specified. + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional optional string ipv6 = 4; } +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +message IPAddress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IPAddressSpec spec = 2; +} + +// IPAddressList contains a list of IPAddress. +message IPAddressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of IPAddresses. + repeated IPAddress items = 2; +} + +// IPAddressSpec describe the attributes in an IP Address. +message IPAddressSpec { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + optional ParentReference parentRef = 1; +} + +// ParentReference describes a reference to a parent object. +message ParentReference { + // Group is the group of the object being referenced. + // +optional + optional string group = 1; + + // Resource is the resource of the object being referenced. + // +required + optional string resource = 2; + + // Namespace is the namespace of the object being referenced. + // +optional + optional string namespace = 3; + + // Name is the name of the object being referenced. + // +required + optional string name = 4; + + // UID is the uid of the object being referenced. + // +optional + optional string uid = 5; +} + diff --git a/vendor/k8s.io/api/networking/v1alpha1/register.go b/vendor/k8s.io/api/networking/v1alpha1/register.go index 12c0cf7b..8dda6394 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/register.go +++ b/vendor/k8s.io/api/networking/v1alpha1/register.go @@ -22,12 +22,17 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package. +// GroupName is the group name used in this package. const GroupName = "networking.k8s.io" -// SchemeGroupVersion is group version used to register these objects. +// SchemeGroupVersion is group version used to register objects in this package. var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + // Resource takes an unqualified resource and returns a Group qualified GroupResource. func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() @@ -49,8 +54,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &ClusterCIDR{}, &ClusterCIDRList{}, + &IPAddress{}, + &IPAddressList{}, ) - // Add the watch version that applies. metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go index 734e9bf8..52e4a11e 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types.go @@ -19,6 +19,7 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) // +genclient @@ -37,12 +38,13 @@ import ( // selector matches the Node may be used. type ClusterCIDR struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the ClusterCIDR. + // spec is the desired state of the ClusterCIDR. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -50,13 +52,13 @@ type ClusterCIDR struct { // ClusterCIDRSpec defines the desired state of ClusterCIDR. type ClusterCIDRSpec struct { - // NodeSelector defines which nodes the config is applicable to. - // An empty or nil NodeSelector selects all nodes. + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. // This field is immutable. // +optional NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` - // PerNodeHostBits defines the number of host bits to be configured per node. + // perNodeHostBits defines the number of host bits to be configured per node. // A subnet mask determines how much of the address is used for network bits // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the // address into 24 bits for the network portion and 8 bits for the host portion. @@ -66,14 +68,14 @@ type ClusterCIDRSpec struct { // +required PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"` - // IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of IPv4 and IPv6 must be specified. + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"` - // IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of IPv4 and IPv6 must be specified. + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"` @@ -85,11 +87,77 @@ type ClusterCIDRSpec struct { // ClusterCIDRList contains a list of ClusterCIDR. type ClusterCIDRList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of ClusterCIDRs. + // items is the list of ClusterCIDRs. Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +type IPAddress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPAddressSpec describe the attributes in an IP Address. +type IPAddressSpec struct { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"` +} + +// ParentReference describes a reference to a parent object. +type ParentReference struct { + // Group is the group of the object being referenced. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resource is the resource of the object being referenced. + // +required + Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"` + // Namespace is the namespace of the object being referenced. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + // Name is the name of the object being referenced. + // +required + Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` + // UID is the uid of the object being referenced. + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// IPAddressList contains a list of IPAddress. +type IPAddressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of IPAddresses. + Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go index e0d4a478..85304784 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ClusterCIDR = map[string]string{ "": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ClusterCIDR) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (ClusterCIDR) SwaggerDoc() map[string]string { var map_ClusterCIDRList = map[string]string{ "": "ClusterCIDRList contains a list of ClusterCIDR.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of ClusterCIDRs.", + "items": "items is the list of ClusterCIDRs.", } func (ClusterCIDRList) SwaggerDoc() map[string]string { @@ -49,14 +49,56 @@ func (ClusterCIDRList) SwaggerDoc() map[string]string { var map_ClusterCIDRSpec = map[string]string{ "": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "nodeSelector": "NodeSelector defines which nodes the config is applicable to. An empty or nil NodeSelector selects all nodes. This field is immutable.", - "perNodeHostBits": "PerNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "ipv4": "IPv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", - "ipv6": "IPv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", + "nodeSelector": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.", + "perNodeHostBits": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", + "ipv4": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", + "ipv6": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", } func (ClusterCIDRSpec) SwaggerDoc() map[string]string { return map_ClusterCIDRSpec } +var map_IPAddress = map[string]string{ + "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (IPAddress) SwaggerDoc() map[string]string { + return map_IPAddress +} + +var map_IPAddressList = map[string]string{ + "": "IPAddressList contains a list of IPAddress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of IPAddresses.", +} + +func (IPAddressList) SwaggerDoc() map[string]string { + return map_IPAddressList +} + +var map_IPAddressSpec = map[string]string{ + "": "IPAddressSpec describe the attributes in an IP Address.", + "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", +} + +func (IPAddressSpec) SwaggerDoc() map[string]string { + return map_IPAddressSpec +} + +var map_ParentReference = map[string]string{ + "": "ParentReference describes a reference to a parent object.", + "group": "Group is the group of the object being referenced.", + "resource": "Resource is the resource of the object being referenced.", + "namespace": "Namespace is the namespace of the object being referenced.", + "name": "Name is the name of the object being referenced.", + "uid": "UID is the uid of the object being referenced.", +} + +func (ParentReference) SwaggerDoc() map[string]string { + return map_ParentReference +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go b/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go new file mode 100644 index 00000000..5f9c23f7 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + + // TODO: Use IPFamily as field with a field selector,And the value is set based on + // the name at create time and immutable. + // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress. + // This label simplify dual-stack client operations allowing to obtain the list of + // IP addresses filtered by family. + LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family" + // LabelManagedBy is used to indicate the controller or entity that manages + // an IPAddress. This label aims to enable different IPAddress + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // IPAddress objects. + LabelManagedBy = "ipaddress.kubernetes.io/managed-by" +) diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go index e549f316..97db2eac 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go @@ -106,3 +106,100 @@ func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddress) DeepCopyInto(out *IPAddress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { + if in == nil { + return nil + } + out := new(IPAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { + if in == nil { + return nil + } + out := new(IPAddressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { + *out = *in + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { + if in == nil { + return nil + } + out := new(IPAddressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go index dd6e3b26..60438ba5 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -56,3 +56,39 @@ func (in *ClusterCIDRList) APILifecycleDeprecated() (major, minor int) { func (in *ClusterCIDRList) APILifecycleRemoved() (major, minor int) { return 1, 31 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddress) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index 78ecf9fa..46bb7f66 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -33,14 +33,14 @@ option go_package = "k8s.io/api/networking/v1beta1"; // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -57,7 +57,7 @@ message HTTPIngressPath { // Defaults to ImplementationSpecific. optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; } @@ -68,7 +68,7 @@ message HTTPIngressPath { // to match against everything after the last '/' and before the first '?' // or '#'. message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. repeated HTTPIngressPath paths = 1; } @@ -82,12 +82,12 @@ message Ingress { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; @@ -95,15 +95,15 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { - // Specifies the name of the referenced service. + // serviceName specifies the name of the referenced service. // +optional optional string serviceName = 1; - // Specifies the port of the referenced service. + // servicePort Specifies the port of the referenced service. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional @@ -121,7 +121,7 @@ message IngressClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressClassSpec spec = 2; @@ -133,30 +133,30 @@ message IngressClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of IngressClasses. + // items is the list of IngressClasses. repeated IngressClass items = 2; } // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. message IngressClassParametersReference { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional optional string aPIGroup = 1; - // Kind is the type of resource being referenced. + // kind is the type of resource being referenced. optional string kind = 2; - // Name is the name of resource being referenced. + // name is the name of resource being referenced. optional string name = 3; - // Scope represents if this refers to a cluster or namespace scoped resource. + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". optional string scope = 4; - // Namespace is the namespace of the resource being referenced. This field is + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -165,15 +165,15 @@ message IngressClassParametersReference { // IngressClassSpec provides information about the class of an Ingress. message IngressClassSpec { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. optional string controller = 1; - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -187,21 +187,21 @@ message IngressList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Ingress. + // items is the list of Ingress. repeated Ingress items = 2; } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. message IngressLoadBalancerIngress { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional optional string ip = 1; - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional optional string hostname = 2; - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional repeated IngressPortStatus ports = 4; @@ -209,21 +209,21 @@ message IngressLoadBalancerIngress { // LoadBalancerStatus represents the status of a load-balancer. message IngressLoadBalancerStatus { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional repeated IngressLoadBalancerIngress ingress = 1; } // IngressPortStatus represents the error condition of a service port message IngressPortStatus { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. optional int32 port = 1; - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" optional string protocol = 2; - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -242,7 +242,7 @@ message IngressPortStatus { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -255,7 +255,7 @@ message IngressRule { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and @@ -287,7 +287,7 @@ message IngressRuleValue { // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -300,44 +300,44 @@ message IngressSpec { // +optional optional string ingressClassName = 4; - // A default backend capable of servicing requests that don't match any + // backend is the default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional optional IngressBackend backend = 1; - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional repeated IngressTLS tls = 2; - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional repeated IngressRule rules = 3; } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional optional IngressLoadBalancerStatus loadBalancer = 1; } // IngressTLS describes the transport layer security associated with an Ingress. message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 49c82123..87cc9165 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -34,17 +34,18 @@ import ( // based virtual hosting etc. type Ingress struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -58,18 +59,19 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -82,22 +84,22 @@ type IngressSpec struct { // +optional IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // A default backend capable of servicing requests that don't match any + // backend is the default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` @@ -106,13 +108,14 @@ type IngressSpec struct { // IngressTLS describes the transport layer security associated with an Ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination @@ -122,31 +125,31 @@ type IngressTLS struct { // TODO: Consider specifying different modes of termination, protocols etc. } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` } // LoadBalancerStatus represents the status of a load-balancer. type IngressLoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. type IngressLoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` @@ -154,14 +157,14 @@ type IngressLoadBalancerIngress struct { // IngressPortStatus represents the error condition of a service port type IngressPortStatus struct { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -180,7 +183,7 @@ type IngressPortStatus struct { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -193,7 +196,7 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and @@ -204,6 +207,7 @@ type IngressRule struct { // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // IngressRuleValue represents a rule to route requests for this IngressRule. // If unspecified, the rule defaults to a http catch-all. Whether that sends // just traffic matching the host to the default backend or all traffic to the @@ -234,7 +238,7 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` // TODO: Consider adding fields for ingress-type specific global // options usable by a loadbalancer, like http keep-alive. @@ -273,14 +277,14 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -297,22 +301,22 @@ type HTTPIngressPath struct { // Defaults to ImplementationSpecific. PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Specifies the name of the referenced service. + // serviceName specifies the name of the referenced service. // +optional ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"` - // Specifies the port of the referenced service. + // servicePort Specifies the port of the referenced service. // +optional ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"` - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional @@ -333,12 +337,13 @@ type IngressBackend struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -346,15 +351,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -373,19 +378,23 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"` - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -404,6 +413,6 @@ type IngressClassList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index 195d535c..b2373669 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "path": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "pathType": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", + "backend": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (HTTPIngressPath) SwaggerDoc() map[string]string { var map_HTTPIngressRuleValue = map[string]string{ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", + "paths": "paths is a collection of paths that map requests to backends.", } func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { @@ -50,8 +50,8 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -60,9 +60,9 @@ func (Ingress) SwaggerDoc() map[string]string { var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", - "serviceName": "Specifies the name of the referenced service.", - "servicePort": "Specifies the port of the referenced service.", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", + "serviceName": "serviceName specifies the name of the referenced service.", + "servicePort": "servicePort Specifies the port of the referenced service.", + "resource": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -72,7 +72,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressClass = map[string]string{ "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (IngressClass) SwaggerDoc() map[string]string { @@ -82,7 +82,7 @@ func (IngressClass) SwaggerDoc() map[string]string { var map_IngressClassList = map[string]string{ "": "IngressClassList is a collection of IngressClasses.", "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", + "items": "items is the list of IngressClasses.", } func (IngressClassList) SwaggerDoc() map[string]string { @@ -91,11 +91,11 @@ func (IngressClassList) SwaggerDoc() map[string]string { var map_IngressClassParametersReference = map[string]string{ "": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced.", - "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", - "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "apiGroup": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "kind is the type of resource being referenced.", + "name": "name is the name of resource being referenced.", + "scope": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "namespace": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } func (IngressClassParametersReference) SwaggerDoc() map[string]string { @@ -104,8 +104,8 @@ func (IngressClassParametersReference) SwaggerDoc() map[string]string { var map_IngressClassSpec = map[string]string{ "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "controller": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", } func (IngressClassSpec) SwaggerDoc() map[string]string { @@ -115,7 +115,7 @@ func (IngressClassSpec) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", + "items": "items is the list of Ingress.", } func (IngressList) SwaggerDoc() map[string]string { @@ -124,9 +124,9 @@ func (IngressList) SwaggerDoc() map[string]string { var map_IngressLoadBalancerIngress = map[string]string{ "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", - "ip": "IP is set for load-balancer ingress points that are IP based.", - "hostname": "Hostname is set for load-balancer ingress points that are DNS based.", - "ports": "Ports provides information about the ports exposed by this LoadBalancer.", + "ip": "ip is set for load-balancer ingress points that are IP based.", + "hostname": "hostname is set for load-balancer ingress points that are DNS based.", + "ports": "ports provides information about the ports exposed by this LoadBalancer.", } func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { @@ -135,7 +135,7 @@ func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { var map_IngressLoadBalancerStatus = map[string]string{ "": "LoadBalancerStatus represents the status of a load-balancer.", - "ingress": "Ingress is a list containing ingress points for the load-balancer.", + "ingress": "ingress is a list containing ingress points for the load-balancer.", } func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { @@ -144,9 +144,9 @@ func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { var map_IngressPortStatus = map[string]string{ "": "IngressPortStatus represents the error condition of a service port", - "port": "Port is the port number of the ingress port.", - "protocol": "Protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", - "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", + "port": "port is the port number of the ingress port.", + "protocol": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", } func (IngressPortStatus) SwaggerDoc() map[string]string { @@ -155,7 +155,7 @@ func (IngressPortStatus) SwaggerDoc() map[string]string { var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -172,10 +172,10 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "ingressClassName": "ingressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", + "backend": "backend is the default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -183,8 +183,8 @@ func (IngressSpec) SwaggerDoc() map[string]string { } var map_IngressStatus = map[string]string{ - "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", + "": "IngressStatus describes the current state of the Ingress.", + "loadBalancer": "loadBalancer contains the current status of the load-balancer.", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -193,8 +193,8 @@ func (IngressStatus) SwaggerDoc() map[string]string { var map_IngressTLS = map[string]string{ "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "hosts": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto index 294be85b..0152d5e3 100644 --- a/vendor/k8s.io/api/node/v1/generated.proto +++ b/vendor/k8s.io/api/node/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -61,13 +61,13 @@ message RuntimeClass { // and is immutable. optional string handler = 2; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ // +optional optional Overhead overhead = 3; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -82,7 +82,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go index 984696d9..b00f5877 100644 --- a/vendor/k8s.io/api/node/v1/types.go +++ b/vendor/k8s.io/api/node/v1/types.go @@ -34,11 +34,12 @@ import ( // https://kubernetes.io/docs/concepts/containers/runtime-class/ type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -50,13 +51,13 @@ type RuntimeClass struct { // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -66,7 +67,7 @@ type RuntimeClass struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -96,11 +97,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go index a9eddc60..f5e6b327 100644 --- a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,9 +39,9 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "handler": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -51,7 +51,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index d46e0ec6..4673e926 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1alpha1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the RuntimeClass + // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status optional RuntimeClassSpec spec = 2; } @@ -61,7 +61,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } @@ -70,7 +70,7 @@ message RuntimeClassList { // Interface (CRI) implementation, as well as any other components that need to // understand how the pod will be run. The RuntimeClassSpec is immutable. message RuntimeClassSpec { - // RuntimeHandler specifies the underlying runtime and configuration that the + // runtimeHandler specifies the underlying runtime and configuration that the // CRI implementation will use to handle pods of this class. The possible // values are specific to the node & CRI configuration. It is assumed that // all handlers are available on every node, and handlers of the same name are @@ -78,17 +78,17 @@ message RuntimeClassSpec { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) // requirements, and is immutable. optional string runtimeHandler = 1; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional optional Overhead overhead = 2; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go index 588c8e4c..bf9e284b 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -34,11 +34,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the RuntimeClass + // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` } @@ -48,7 +49,7 @@ type RuntimeClass struct { // Interface (CRI) implementation, as well as any other components that need to // understand how the pod will be run. The RuntimeClassSpec is immutable. type RuntimeClassSpec struct { - // RuntimeHandler specifies the underlying runtime and configuration that the + // runtimeHandler specifies the underlying runtime and configuration that the // CRI implementation will use to handle pods of this class. The possible // values are specific to the node & CRI configuration. It is assumed that // all handlers are available on every node, and handlers of the same name are @@ -56,17 +57,17 @@ type RuntimeClassSpec struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) // requirements, and is immutable. RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -76,7 +77,7 @@ type RuntimeClassSpec struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -106,11 +107,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index 96413754..ccc1b708 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec represents specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -49,7 +49,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { @@ -58,9 +58,9 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", - "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "runtimeHandler": "runtimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClassSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 8ffad697..54dbc099 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1beta1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -57,17 +57,17 @@ message RuntimeClass { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, // and is immutable. optional string handler = 2; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional optional Overhead overhead = 3; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -82,7 +82,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go index b924cb42..74ecca26 100644 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -36,11 +36,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -48,17 +49,17 @@ type RuntimeClass struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -68,7 +69,7 @@ type RuntimeClass struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -100,11 +101,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index fec4398b..086105ec 100644 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,9 +39,9 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "handler": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -51,7 +51,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/policy/v1/generated.proto b/vendor/k8s.io/api/policy/v1/generated.proto index 0a1e010b..a79e7102 100644 --- a/vendor/k8s.io/api/policy/v1/generated.proto +++ b/vendor/k8s.io/api/policy/v1/generated.proto @@ -116,8 +116,8 @@ message PodDisruptionBudgetSpec { // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. // - // This field is alpha-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default). + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional optional string unhealthyPodEvictionPolicy = 4; } diff --git a/vendor/k8s.io/api/policy/v1/types.go b/vendor/k8s.io/api/policy/v1/types.go index 6aec30b8..45b9550f 100644 --- a/vendor/k8s.io/api/policy/v1/types.go +++ b/vendor/k8s.io/api/policy/v1/types.go @@ -71,8 +71,8 @@ type PodDisruptionBudgetSpec struct { // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. // - // This field is alpha-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default). + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"` } diff --git a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go index 582b28c1..799b0794 100644 --- a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Eviction = map[string]string{ @@ -63,7 +63,7 @@ var map_PodDisruptionBudgetSpec = map[string]string{ "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.", "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", - "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is alpha-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default).", + "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", } func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 989b4845..16301c23 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -177,8 +177,8 @@ message PodDisruptionBudgetSpec { // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. // - // This field is alpha-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default). + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional optional string unhealthyPodEvictionPolicy = 4; } diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 863b2b87..1e6b075e 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -69,8 +69,8 @@ type PodDisruptionBudgetSpec struct { // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. // - // This field is alpha-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default). + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"` } diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index cebba07f..266a9a85 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AllowedCSIDriver = map[string]string{ @@ -121,7 +121,7 @@ var map_PodDisruptionBudgetSpec = map[string]string{ "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector selects no pods. An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. In policy/v1, an empty selector will select all pods in the namespace.", "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", - "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is alpha-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default).", + "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", } func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 63aa4ed7..37039819 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index 08578aba..6708f3e5 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index db952583..fff1fe40 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ diff --git a/vendor/k8s.io/api/resource/v1alpha1/doc.go b/vendor/k8s.io/api/resource/v1alpha2/doc.go similarity index 84% rename from vendor/k8s.io/api/resource/v1alpha1/doc.go rename to vendor/k8s.io/api/resource/v1alpha2/doc.go index 8fa577fa..d9c20e08 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/doc.go +++ b/vendor/k8s.io/api/resource/v1alpha2/doc.go @@ -20,5 +20,5 @@ limitations under the License. // +groupName=resource.k8s.io -// Package v1alpha1 is the v1alpha1 version of the resource API. -package v1alpha1 // import "k8s.io/api/resource/v1alpha1" +// Package v1alpha2 is the v1alpha2 version of the resource API. +package v1alpha2 // import "k8s.io/api/resource/v1alpha2" diff --git a/vendor/k8s.io/api/resource/v1alpha1/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go similarity index 83% rename from vendor/k8s.io/api/resource/v1alpha1/generated.pb.go rename to vendor/k8s.io/api/resource/v1alpha2/generated.pb.go index 632ad042..2e8f9c72 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go @@ -15,9 +15,9 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha2/generated.proto -package v1alpha1 +package v1alpha2 import ( fmt "fmt" @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AllocationResult) Reset() { *m = AllocationResult{} } func (*AllocationResult) ProtoMessage() {} func (*AllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{0} + return fileDescriptor_3add37bbd52889e0, []int{0} } func (m *AllocationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,15 +74,15 @@ func (m *AllocationResult) XXX_DiscardUnknown() { var xxx_messageInfo_AllocationResult proto.InternalMessageInfo -func (m *PodScheduling) Reset() { *m = PodScheduling{} } -func (*PodScheduling) ProtoMessage() {} -func (*PodScheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{1} +func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} } +func (*PodSchedulingContext) ProtoMessage() {} +func (*PodSchedulingContext) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{1} } -func (m *PodScheduling) XXX_Unmarshal(b []byte) error { +func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -90,27 +90,27 @@ func (m *PodScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error } return b[:n], nil } -func (m *PodScheduling) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodScheduling.Merge(m, src) +func (m *PodSchedulingContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContext.Merge(m, src) } -func (m *PodScheduling) XXX_Size() int { +func (m *PodSchedulingContext) XXX_Size() int { return m.Size() } -func (m *PodScheduling) XXX_DiscardUnknown() { - xxx_messageInfo_PodScheduling.DiscardUnknown(m) +func (m *PodSchedulingContext) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m) } -var xxx_messageInfo_PodScheduling proto.InternalMessageInfo +var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo -func (m *PodSchedulingList) Reset() { *m = PodSchedulingList{} } -func (*PodSchedulingList) ProtoMessage() {} -func (*PodSchedulingList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{2} +func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} } +func (*PodSchedulingContextList) ProtoMessage() {} +func (*PodSchedulingContextList) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{2} } -func (m *PodSchedulingList) XXX_Unmarshal(b []byte) error { +func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSchedulingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -118,27 +118,27 @@ func (m *PodSchedulingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, e } return b[:n], nil } -func (m *PodSchedulingList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingList.Merge(m, src) +func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextList.Merge(m, src) } -func (m *PodSchedulingList) XXX_Size() int { +func (m *PodSchedulingContextList) XXX_Size() int { return m.Size() } -func (m *PodSchedulingList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingList.DiscardUnknown(m) +func (m *PodSchedulingContextList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m) } -var xxx_messageInfo_PodSchedulingList proto.InternalMessageInfo +var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo -func (m *PodSchedulingSpec) Reset() { *m = PodSchedulingSpec{} } -func (*PodSchedulingSpec) ProtoMessage() {} -func (*PodSchedulingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{3} +func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} } +func (*PodSchedulingContextSpec) ProtoMessage() {} +func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{3} } -func (m *PodSchedulingSpec) XXX_Unmarshal(b []byte) error { +func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSchedulingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -146,27 +146,27 @@ func (m *PodSchedulingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, e } return b[:n], nil } -func (m *PodSchedulingSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingSpec.Merge(m, src) +func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src) } -func (m *PodSchedulingSpec) XXX_Size() int { +func (m *PodSchedulingContextSpec) XXX_Size() int { return m.Size() } -func (m *PodSchedulingSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingSpec.DiscardUnknown(m) +func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m) } -var xxx_messageInfo_PodSchedulingSpec proto.InternalMessageInfo +var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo -func (m *PodSchedulingStatus) Reset() { *m = PodSchedulingStatus{} } -func (*PodSchedulingStatus) ProtoMessage() {} -func (*PodSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{4} +func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} } +func (*PodSchedulingContextStatus) ProtoMessage() {} +func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{4} } -func (m *PodSchedulingStatus) XXX_Unmarshal(b []byte) error { +func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -174,22 +174,22 @@ func (m *PodSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, } return b[:n], nil } -func (m *PodSchedulingStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingStatus.Merge(m, src) +func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src) } -func (m *PodSchedulingStatus) XXX_Size() int { +func (m *PodSchedulingContextStatus) XXX_Size() int { return m.Size() } -func (m *PodSchedulingStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingStatus.DiscardUnknown(m) +func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m) } -var xxx_messageInfo_PodSchedulingStatus proto.InternalMessageInfo +var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{5} + return fileDescriptor_3add37bbd52889e0, []int{5} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +217,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } func (*ResourceClaimConsumerReference) ProtoMessage() {} func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{6} + return fileDescriptor_3add37bbd52889e0, []int{6} } func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +245,7 @@ var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } func (*ResourceClaimList) ProtoMessage() {} func (*ResourceClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{7} + return fileDescriptor_3add37bbd52889e0, []int{7} } func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +273,7 @@ var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo func (m *ResourceClaimParametersReference) Reset() { *m = ResourceClaimParametersReference{} } func (*ResourceClaimParametersReference) ProtoMessage() {} func (*ResourceClaimParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{8} + return fileDescriptor_3add37bbd52889e0, []int{8} } func (m *ResourceClaimParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,7 +301,7 @@ var xxx_messageInfo_ResourceClaimParametersReference proto.InternalMessageInfo func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} } func (*ResourceClaimSchedulingStatus) ProtoMessage() {} func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{9} + return fileDescriptor_3add37bbd52889e0, []int{9} } func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +329,7 @@ var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } func (*ResourceClaimSpec) ProtoMessage() {} func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{10} + return fileDescriptor_3add37bbd52889e0, []int{10} } func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +357,7 @@ var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } func (*ResourceClaimStatus) ProtoMessage() {} func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{11} + return fileDescriptor_3add37bbd52889e0, []int{11} } func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +385,7 @@ var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } func (*ResourceClaimTemplate) ProtoMessage() {} func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{12} + return fileDescriptor_3add37bbd52889e0, []int{12} } func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +413,7 @@ var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } func (*ResourceClaimTemplateList) ProtoMessage() {} func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{13} + return fileDescriptor_3add37bbd52889e0, []int{13} } func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +441,7 @@ var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } func (*ResourceClaimTemplateSpec) ProtoMessage() {} func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{14} + return fileDescriptor_3add37bbd52889e0, []int{14} } func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +469,7 @@ var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo func (m *ResourceClass) Reset() { *m = ResourceClass{} } func (*ResourceClass) ProtoMessage() {} func (*ResourceClass) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{15} + return fileDescriptor_3add37bbd52889e0, []int{15} } func (m *ResourceClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,7 +497,7 @@ var xxx_messageInfo_ResourceClass proto.InternalMessageInfo func (m *ResourceClassList) Reset() { *m = ResourceClassList{} } func (*ResourceClassList) ProtoMessage() {} func (*ResourceClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{16} + return fileDescriptor_3add37bbd52889e0, []int{16} } func (m *ResourceClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -525,7 +525,7 @@ var xxx_messageInfo_ResourceClassList proto.InternalMessageInfo func (m *ResourceClassParametersReference) Reset() { *m = ResourceClassParametersReference{} } func (*ResourceClassParametersReference) ProtoMessage() {} func (*ResourceClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{17} + return fileDescriptor_3add37bbd52889e0, []int{17} } func (m *ResourceClassParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,107 +550,140 @@ func (m *ResourceClassParametersReference) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceClassParametersReference proto.InternalMessageInfo +func (m *ResourceHandle) Reset() { *m = ResourceHandle{} } +func (*ResourceHandle) ProtoMessage() {} +func (*ResourceHandle) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{18} +} +func (m *ResourceHandle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceHandle) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceHandle.Merge(m, src) +} +func (m *ResourceHandle) XXX_Size() int { + return m.Size() +} +func (m *ResourceHandle) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceHandle.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceHandle proto.InternalMessageInfo + func init() { - proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha1.AllocationResult") - proto.RegisterType((*PodScheduling)(nil), "k8s.io.api.resource.v1alpha1.PodScheduling") - proto.RegisterType((*PodSchedulingList)(nil), "k8s.io.api.resource.v1alpha1.PodSchedulingList") - proto.RegisterType((*PodSchedulingSpec)(nil), "k8s.io.api.resource.v1alpha1.PodSchedulingSpec") - proto.RegisterType((*PodSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha1.PodSchedulingStatus") - proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaim") - proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimConsumerReference") - proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimList") - proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimParametersReference") - proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimSchedulingStatus") - proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimSpec") - proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimStatus") - proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimTemplate") - proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimTemplateList") - proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimTemplateSpec") - proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha1.ResourceClass") - proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha1.ResourceClassList") - proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha1.ResourceClassParametersReference") + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha2.AllocationResult") + proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContext") + proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextList") + proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextSpec") + proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextStatus") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimList") + proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParametersReference") + proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSchedulingStatus") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha2.ResourceClass") + proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassList") + proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParametersReference") + proto.RegisterType((*ResourceHandle)(nil), "k8s.io.api.resource.v1alpha2.ResourceHandle") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha1/generated.proto", fileDescriptor_a66b2ee03d862be2) -} - -var fileDescriptor_a66b2ee03d862be2 = []byte{ - // 1174 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0x6e, 0x95, 0x8c, 0x1b, 0x37, 0xd9, 0x34, 0xc8, 0x8d, 0x5a, 0xdb, 0xec, 0xc9, - 0x12, 0xb0, 0xdb, 0x04, 0x04, 0x15, 0x1f, 0x95, 0xb2, 0x0d, 0x94, 0x08, 0x9a, 0x9a, 0x31, 0x91, - 0x08, 0x42, 0x88, 0xf1, 0xee, 0xab, 0xbd, 0x64, 0xbf, 0xd8, 0xd9, 0x35, 0xaa, 0xb8, 0xf4, 0xca, - 0x0d, 0x21, 0xee, 0x1c, 0xf9, 0x43, 0x10, 0x52, 0x8e, 0x91, 0xe0, 0xd0, 0x93, 0x45, 0xcc, 0x81, - 0x3f, 0x80, 0x13, 0x3d, 0xa1, 0x19, 0xef, 0xae, 0x77, 0xd6, 0x1f, 0xc4, 0x11, 0x8a, 0xc2, 0x29, - 0x99, 0x79, 0xbf, 0xf7, 0x9b, 0xf7, 0x31, 0xef, 0xcd, 0x5b, 0xa3, 0x77, 0x8f, 0xee, 0x52, 0xd5, - 0xf2, 0xb4, 0xa3, 0xa8, 0x0d, 0x81, 0x0b, 0x21, 0x50, 0xad, 0x07, 0xae, 0xe9, 0x05, 0x5a, 0x2c, - 0x20, 0xbe, 0xa5, 0x05, 0x40, 0xbd, 0x28, 0x30, 0x40, 0xeb, 0x6d, 0x11, 0xdb, 0xef, 0x92, 0x2d, - 0xad, 0x03, 0x2e, 0x04, 0x24, 0x04, 0x53, 0xf5, 0x03, 0x2f, 0xf4, 0xe4, 0x5b, 0x43, 0xb4, 0x4a, - 0x7c, 0x4b, 0x4d, 0xd0, 0x6a, 0x82, 0xde, 0x7c, 0xa5, 0x63, 0x85, 0xdd, 0xa8, 0xad, 0x1a, 0x9e, - 0xa3, 0x75, 0xbc, 0x8e, 0xa7, 0x71, 0xa5, 0x76, 0xf4, 0x98, 0xaf, 0xf8, 0x82, 0xff, 0x37, 0x24, - 0xdb, 0x54, 0x32, 0x47, 0x1b, 0x5e, 0xc0, 0x8e, 0xcd, 0x1f, 0xb8, 0xf9, 0xda, 0x08, 0xe3, 0x10, - 0xa3, 0x6b, 0xb9, 0x10, 0x3c, 0xd1, 0xfc, 0xa3, 0x0e, 0xdb, 0xa0, 0x9a, 0x03, 0x21, 0x99, 0xa4, - 0xa5, 0x4d, 0xd3, 0x0a, 0x22, 0x37, 0xb4, 0x1c, 0x18, 0x53, 0x78, 0xfd, 0xdf, 0x14, 0xa8, 0xd1, - 0x05, 0x87, 0xe4, 0xf5, 0x94, 0x3f, 0x25, 0xb4, 0xba, 0x63, 0xdb, 0x9e, 0x41, 0x42, 0xcb, 0x73, - 0x31, 0xd0, 0xc8, 0x0e, 0xe5, 0x7b, 0xa8, 0x9c, 0xc4, 0xe6, 0x7d, 0xe2, 0x9a, 0x36, 0x54, 0xa4, - 0xba, 0xd4, 0x58, 0xd6, 0x5f, 0x38, 0xee, 0xd7, 0x16, 0x06, 0xfd, 0x5a, 0x19, 0x0b, 0x52, 0x9c, - 0x43, 0xcb, 0x6d, 0xb4, 0x4a, 0x7a, 0xc4, 0xb2, 0x49, 0xdb, 0x86, 0x47, 0xee, 0xbe, 0x67, 0x02, - 0xad, 0x2c, 0xd6, 0xa5, 0x46, 0x69, 0xbb, 0xae, 0x66, 0xe2, 0xcf, 0x42, 0xa6, 0xf6, 0xb6, 0x54, - 0x06, 0x68, 0x81, 0x0d, 0x46, 0xe8, 0x05, 0xfa, 0x8d, 0x41, 0xbf, 0xb6, 0xba, 0x93, 0xd3, 0xc6, - 0x63, 0x7c, 0xb2, 0x86, 0x96, 0x69, 0x97, 0x04, 0xc0, 0xf6, 0x2a, 0x85, 0xba, 0xd4, 0x58, 0xd2, - 0xd7, 0x62, 0xf3, 0x96, 0x5b, 0x89, 0x00, 0x8f, 0x30, 0xca, 0x8f, 0x8b, 0x68, 0xa5, 0xe9, 0x99, - 0x2d, 0xa3, 0x0b, 0x66, 0x64, 0x5b, 0x6e, 0x47, 0xfe, 0x02, 0x2d, 0xb1, 0xf8, 0x9b, 0x24, 0x24, - 0xdc, 0xc1, 0xd2, 0xf6, 0x9d, 0x8c, 0x79, 0x69, 0x18, 0x55, 0xff, 0xa8, 0xc3, 0x36, 0xa8, 0xca, - 0xd0, 0xcc, 0xe0, 0x47, 0xed, 0x2f, 0xc1, 0x08, 0x1f, 0x42, 0x48, 0x74, 0x39, 0x3e, 0x13, 0x8d, - 0xf6, 0x70, 0xca, 0x2a, 0x7f, 0x84, 0x8a, 0xd4, 0x07, 0x23, 0x76, 0x5e, 0x53, 0x67, 0x5d, 0x3e, - 0x55, 0x30, 0xae, 0xe5, 0x83, 0xa1, 0x5f, 0x8b, 0xc9, 0x8b, 0x6c, 0x85, 0x39, 0x95, 0x7c, 0x88, - 0xae, 0xd2, 0x90, 0x84, 0x11, 0xe5, 0x4e, 0x97, 0xb6, 0xb7, 0xe6, 0x21, 0xe5, 0x8a, 0x7a, 0x39, - 0xa6, 0xbd, 0x3a, 0x5c, 0xe3, 0x98, 0x50, 0xf9, 0x59, 0x42, 0x6b, 0x02, 0xfe, 0x43, 0x8b, 0x86, - 0xf2, 0x67, 0x63, 0x51, 0x52, 0xcf, 0x16, 0x25, 0xa6, 0xcd, 0x63, 0xb4, 0x1a, 0x9f, 0xb7, 0x94, - 0xec, 0x64, 0x22, 0xd4, 0x44, 0x57, 0xac, 0x10, 0x1c, 0x76, 0x3f, 0x0a, 0x8d, 0xd2, 0xf6, 0x4b, - 0x73, 0x78, 0xa3, 0xaf, 0xc4, 0xbc, 0x57, 0xf6, 0x18, 0x03, 0x1e, 0x12, 0x29, 0xdf, 0xe6, 0xbd, - 0x60, 0xc1, 0x93, 0xef, 0xa2, 0x6b, 0x94, 0x5f, 0x31, 0x30, 0xd9, 0xfd, 0x89, 0x2f, 0xf4, 0x8d, - 0x98, 0xe1, 0x5a, 0x2b, 0x23, 0xc3, 0x02, 0x52, 0x7e, 0x13, 0x95, 0x7d, 0x2f, 0x04, 0x37, 0xb4, - 0x88, 0x9d, 0x5c, 0xe5, 0x42, 0x63, 0x59, 0x97, 0x59, 0x21, 0x34, 0x05, 0x09, 0xce, 0x21, 0x95, - 0xef, 0x25, 0xb4, 0x3e, 0x21, 0x03, 0xf2, 0x37, 0xa3, 0x02, 0xbb, 0x6f, 0x13, 0xcb, 0xa1, 0x15, - 0x89, 0xbb, 0xff, 0xd6, 0x6c, 0xf7, 0x71, 0x56, 0x67, 0x2c, 0xad, 0x63, 0xd5, 0x39, 0xa4, 0xc6, - 0xb9, 0xa3, 0x78, 0x21, 0x08, 0x90, 0xcb, 0x56, 0x08, 0xa2, 0x9b, 0xff, 0x51, 0x21, 0x88, 0xa4, - 0xb3, 0x0b, 0x61, 0x20, 0xa1, 0xaa, 0x80, 0xbf, 0xef, 0xb9, 0x34, 0x72, 0x20, 0xc0, 0xf0, 0x18, - 0x02, 0x70, 0x0d, 0x90, 0x5f, 0x46, 0x4b, 0xc4, 0xb7, 0x1e, 0x04, 0x5e, 0xe4, 0xc7, 0x77, 0x29, - 0xbd, 0xe5, 0x3b, 0xcd, 0x3d, 0xbe, 0x8f, 0x53, 0x04, 0x43, 0x27, 0x16, 0x71, 0x6b, 0x33, 0xe8, - 0xe4, 0x1c, 0x9c, 0x22, 0xe4, 0x3a, 0x2a, 0xba, 0xc4, 0x81, 0x4a, 0x91, 0x23, 0x53, 0xdf, 0xf7, - 0x89, 0x03, 0x98, 0x4b, 0x64, 0x1d, 0x15, 0x22, 0xcb, 0xac, 0x5c, 0xe1, 0x80, 0x3b, 0x31, 0xa0, - 0x70, 0xb0, 0xb7, 0xfb, 0xbc, 0x5f, 0x7b, 0x71, 0xda, 0x4b, 0x10, 0x3e, 0xf1, 0x81, 0xaa, 0x07, - 0x7b, 0xbb, 0x98, 0x29, 0xf3, 0x6a, 0x17, 0x9c, 0xbc, 0x74, 0xd5, 0x2e, 0x58, 0x37, 0xa5, 0xda, - 0x7f, 0x90, 0x50, 0x5d, 0xc0, 0x35, 0x49, 0x40, 0x1c, 0x08, 0x21, 0xa0, 0xe7, 0x4d, 0x56, 0x1d, - 0x15, 0x8f, 0x2c, 0xd7, 0xe4, 0x77, 0x35, 0x13, 0xfe, 0x0f, 0x2c, 0xd7, 0xc4, 0x5c, 0x92, 0x26, - 0xa8, 0x30, 0x2d, 0x41, 0xca, 0x53, 0x09, 0xdd, 0x9e, 0x59, 0xad, 0x29, 0x87, 0x34, 0x35, 0xc9, - 0xef, 0xa0, 0xeb, 0x91, 0x4b, 0x23, 0x2b, 0x64, 0xcf, 0x57, 0xb6, 0xf3, 0xac, 0x0f, 0xfa, 0xb5, - 0xeb, 0x07, 0xa2, 0x08, 0xe7, 0xb1, 0xca, 0x4f, 0x8b, 0xb9, 0xfc, 0xf2, 0x3e, 0xf8, 0x00, 0xad, - 0x65, 0xda, 0x01, 0xa5, 0xfb, 0x23, 0x1b, 0x6e, 0xc6, 0x36, 0x64, 0xb5, 0x86, 0x00, 0x3c, 0xae, - 0x23, 0x7f, 0x8d, 0x56, 0xfc, 0x6c, 0xa8, 0xe3, 0xd2, 0xbe, 0x37, 0x47, 0x4a, 0x27, 0xa4, 0x4a, - 0x5f, 0x1b, 0xf4, 0x6b, 0x2b, 0x82, 0x00, 0x8b, 0xe7, 0xc8, 0x4d, 0x54, 0x26, 0xe9, 0xc0, 0xf2, - 0x90, 0xf5, 0xf2, 0x61, 0x1a, 0x1a, 0x49, 0xfb, 0xdb, 0x11, 0xa4, 0xcf, 0xc7, 0x76, 0x70, 0x4e, - 0x5f, 0xf9, 0x6b, 0x11, 0xad, 0x4f, 0x68, 0x0f, 0xf2, 0x36, 0x42, 0x66, 0x60, 0xf5, 0x20, 0xc8, - 0x04, 0x29, 0x6d, 0x73, 0xbb, 0xa9, 0x04, 0x67, 0x50, 0xf2, 0xe7, 0x08, 0x8d, 0xd8, 0xe3, 0x98, - 0xa8, 0xb3, 0x63, 0x92, 0x1f, 0xbf, 0xf4, 0x32, 0xe3, 0xcf, 0xec, 0x66, 0x18, 0x65, 0x8a, 0x4a, - 0x01, 0x50, 0x08, 0x7a, 0x60, 0xbe, 0xe7, 0x05, 0x95, 0x02, 0xaf, 0xa3, 0xb7, 0xe7, 0x08, 0xfa, - 0x58, 0x2b, 0xd3, 0xd7, 0x63, 0x97, 0x4a, 0x78, 0x44, 0x8c, 0xb3, 0xa7, 0xc8, 0x2d, 0xb4, 0x61, - 0x02, 0xc9, 0x98, 0xf9, 0x55, 0x04, 0x34, 0x04, 0x93, 0x77, 0xa8, 0x25, 0xfd, 0x76, 0x4c, 0xb0, - 0xb1, 0x3b, 0x09, 0x84, 0x27, 0xeb, 0x2a, 0xbf, 0x49, 0x68, 0x43, 0xb0, 0xec, 0x63, 0x70, 0x7c, - 0x9b, 0x84, 0x70, 0x01, 0xcf, 0xd1, 0xa1, 0xf0, 0x1c, 0xbd, 0x31, 0x47, 0xf8, 0x12, 0x23, 0xa7, - 0x3d, 0x4b, 0xca, 0xaf, 0x12, 0xba, 0x39, 0x51, 0xe3, 0x02, 0xda, 0xeb, 0x27, 0x62, 0x7b, 0x7d, - 0xf5, 0x1c, 0x7e, 0x4d, 0x69, 0xb3, 0x27, 0xd3, 0xbc, 0xe2, 0x4d, 0xe5, 0xff, 0x38, 0x3f, 0x28, - 0x7f, 0x8b, 0x63, 0x10, 0xa5, 0x17, 0xe0, 0x86, 0xd8, 0x51, 0x16, 0xcf, 0xd4, 0x51, 0xc6, 0x1a, - 0x6d, 0x61, 0xce, 0x46, 0x4b, 0xe9, 0xf9, 0x1a, 0xed, 0x21, 0x5a, 0x11, 0x5f, 0x9f, 0xe2, 0x19, - 0x3f, 0xe1, 0x38, 0x75, 0x4b, 0x78, 0x9d, 0x44, 0xa6, 0xfc, 0xec, 0x41, 0xe9, 0x65, 0x9e, 0x3d, - 0x28, 0x9d, 0x52, 0x14, 0xbf, 0x88, 0xb3, 0xc7, 0xc4, 0x38, 0x5f, 0xfc, 0xec, 0xc1, 0xbe, 0x8c, - 0xd9, 0x5f, 0xea, 0x13, 0x23, 0x99, 0x21, 0xd3, 0x2f, 0xe3, 0xfd, 0x44, 0x80, 0x47, 0x18, 0x5d, - 0x3f, 0x3e, 0xad, 0x2e, 0x9c, 0x9c, 0x56, 0x17, 0x9e, 0x9d, 0x56, 0x17, 0x9e, 0x0e, 0xaa, 0xd2, - 0xf1, 0xa0, 0x2a, 0x9d, 0x0c, 0xaa, 0xd2, 0xb3, 0x41, 0x55, 0xfa, 0x7d, 0x50, 0x95, 0xbe, 0xfb, - 0xa3, 0xba, 0xf0, 0xe9, 0xad, 0x59, 0xbf, 0xb3, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x0a, - 0x8b, 0x49, 0x9f, 0x11, 0x00, 0x00, + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha2/generated.proto", fileDescriptor_3add37bbd52889e0) +} + +var fileDescriptor_3add37bbd52889e0 = []byte{ + // 1233 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xda, 0x6e, 0x95, 0x4c, 0x1a, 0x37, 0xd9, 0xb6, 0xe0, 0x46, 0xad, 0x63, 0xf6, 0x14, + 0x89, 0xb2, 0xdb, 0x06, 0x54, 0x2a, 0xfe, 0x49, 0xd9, 0x06, 0x4a, 0x04, 0x4d, 0xc3, 0x98, 0x8a, + 0x16, 0x21, 0xd4, 0xc9, 0xee, 0xab, 0xbd, 0x64, 0xff, 0xb1, 0x33, 0x6b, 0xa8, 0xb8, 0xf4, 0x23, + 0xf4, 0xc0, 0x01, 0x4e, 0x1c, 0xf9, 0x02, 0x7c, 0x03, 0x84, 0xd4, 0x63, 0x11, 0x1c, 0x7a, 0xb2, + 0xa8, 0xf9, 0x08, 0x9c, 0xe8, 0x09, 0xcd, 0x78, 0x77, 0xbd, 0xb3, 0xf6, 0x9a, 0x38, 0x07, 0x0b, + 0x4e, 0xc9, 0xcc, 0xfb, 0xbd, 0xdf, 0xfb, 0x37, 0xef, 0xcd, 0xac, 0xd1, 0xbb, 0x87, 0xd7, 0xa8, + 0xee, 0x04, 0xc6, 0x61, 0x7c, 0x00, 0x91, 0x0f, 0x0c, 0xa8, 0xd1, 0x03, 0xdf, 0x0e, 0x22, 0x23, + 0x11, 0x90, 0xd0, 0x31, 0x22, 0xa0, 0x41, 0x1c, 0x59, 0x60, 0xf4, 0xae, 0x10, 0x37, 0xec, 0x92, + 0x2d, 0xa3, 0x03, 0x3e, 0x44, 0x84, 0x81, 0xad, 0x87, 0x51, 0xc0, 0x02, 0xf5, 0xc2, 0x10, 0xad, + 0x93, 0xd0, 0xd1, 0x53, 0xb4, 0x9e, 0xa2, 0xd7, 0x5f, 0xe9, 0x38, 0xac, 0x1b, 0x1f, 0xe8, 0x56, + 0xe0, 0x19, 0x9d, 0xa0, 0x13, 0x18, 0x42, 0xe9, 0x20, 0xbe, 0x2f, 0x56, 0x62, 0x21, 0xfe, 0x1b, + 0x92, 0xad, 0x6b, 0x39, 0xd3, 0x56, 0x10, 0x71, 0xb3, 0x45, 0x83, 0xeb, 0xaf, 0x8d, 0x30, 0x1e, + 0xb1, 0xba, 0x8e, 0x0f, 0xd1, 0x03, 0x23, 0x3c, 0xec, 0xf0, 0x0d, 0x6a, 0x78, 0xc0, 0xc8, 0x24, + 0x2d, 0xa3, 0x4c, 0x2b, 0x8a, 0x7d, 0xe6, 0x78, 0x30, 0xa6, 0x70, 0xf5, 0xdf, 0x14, 0xa8, 0xd5, + 0x05, 0x8f, 0x14, 0xf5, 0xb4, 0xef, 0x2a, 0x68, 0x75, 0xdb, 0x75, 0x03, 0x8b, 0x30, 0x27, 0xf0, + 0x31, 0xd0, 0xd8, 0x65, 0x6a, 0x80, 0x4e, 0xa7, 0xb9, 0x79, 0x9f, 0xf8, 0xb6, 0x0b, 0xb4, 0xa1, + 0xb4, 0xaa, 0x9b, 0xcb, 0x5b, 0x97, 0xf4, 0x69, 0xe9, 0xd3, 0xb1, 0xa4, 0x64, 0xbe, 0xf8, 0xb8, + 0xbf, 0xb1, 0x30, 0xe8, 0x6f, 0x9c, 0x96, 0xf7, 0x29, 0x2e, 0xb2, 0xab, 0x07, 0x68, 0x95, 0xf4, + 0x88, 0xe3, 0x92, 0x03, 0x17, 0x6e, 0xf9, 0x7b, 0x81, 0x0d, 0xb4, 0x51, 0x69, 0x29, 0x9b, 0xcb, + 0x5b, 0xad, 0xbc, 0x45, 0x9e, 0x63, 0xbd, 0x77, 0x45, 0xe7, 0x80, 0x36, 0xb8, 0x60, 0xb1, 0x20, + 0x32, 0xcf, 0x0e, 0xfa, 0x1b, 0xab, 0xdb, 0x05, 0x6d, 0x3c, 0xc6, 0xa7, 0x1a, 0x68, 0x89, 0x76, + 0x49, 0x04, 0x7c, 0xaf, 0x51, 0x6d, 0x29, 0x9b, 0x8b, 0xe6, 0x5a, 0xe2, 0xe0, 0x52, 0x3b, 0x15, + 0xe0, 0x11, 0x46, 0xfb, 0xa9, 0x82, 0xce, 0xee, 0x07, 0x76, 0xdb, 0xea, 0x82, 0x1d, 0xbb, 0x8e, + 0xdf, 0xb9, 0x1e, 0xf8, 0x0c, 0xbe, 0x66, 0xea, 0x3d, 0xb4, 0xc8, 0xeb, 0x66, 0x13, 0x46, 0x1a, + 0x8a, 0xf0, 0xf2, 0x72, 0xce, 0xcb, 0x2c, 0xfd, 0x7a, 0x78, 0xd8, 0xe1, 0x1b, 0x54, 0xe7, 0x68, + 0xee, 0xf7, 0xad, 0x83, 0x2f, 0xc0, 0x62, 0x37, 0x81, 0x11, 0x53, 0x4d, 0x4c, 0xa3, 0xd1, 0x1e, + 0xce, 0x58, 0xd5, 0x3b, 0xa8, 0x46, 0x43, 0xb0, 0x92, 0x1c, 0x5c, 0x9d, 0x9e, 0xf5, 0x49, 0x3e, + 0xb6, 0x43, 0xb0, 0xcc, 0x53, 0x89, 0x8d, 0x1a, 0x5f, 0x61, 0xc1, 0xa8, 0xde, 0x43, 0x27, 0x29, + 0x23, 0x2c, 0xa6, 0x22, 0x05, 0xcb, 0x5b, 0xd7, 0x8e, 0xc1, 0x2d, 0xf4, 0xcd, 0x7a, 0xc2, 0x7e, + 0x72, 0xb8, 0xc6, 0x09, 0xaf, 0xf6, 0xab, 0x82, 0x1a, 0x93, 0xd4, 0x3e, 0x74, 0x28, 0x53, 0x3f, + 0x1b, 0x4b, 0x9d, 0x7e, 0xb4, 0xd4, 0x71, 0x6d, 0x91, 0xb8, 0xd5, 0xc4, 0xec, 0x62, 0xba, 0x93, + 0x4b, 0xdb, 0x27, 0xe8, 0x84, 0xc3, 0xc0, 0xe3, 0x67, 0x87, 0x9f, 0xd6, 0xad, 0xd9, 0x63, 0x33, + 0x57, 0x12, 0xfa, 0x13, 0xbb, 0x9c, 0x08, 0x0f, 0xf9, 0xb4, 0x47, 0x25, 0x31, 0xf1, 0xc4, 0xaa, + 0xd7, 0xd0, 0x29, 0x2a, 0x0e, 0x23, 0xd8, 0xfc, 0xa4, 0x89, 0xb8, 0x96, 0xcc, 0xb3, 0x09, 0xd1, + 0xa9, 0x76, 0x4e, 0x86, 0x25, 0xa4, 0xfa, 0x06, 0xaa, 0x87, 0x01, 0x03, 0x9f, 0x39, 0xc4, 0x4d, + 0x0f, 0x7d, 0x75, 0x73, 0xc9, 0x54, 0x07, 0xfd, 0x8d, 0xfa, 0xbe, 0x24, 0xc1, 0x05, 0xa4, 0xf6, + 0xbd, 0x82, 0xd6, 0xcb, 0xab, 0xa3, 0x7e, 0x83, 0xea, 0x69, 0xc4, 0xd7, 0x5d, 0xe2, 0x78, 0x69, + 0x07, 0xbf, 0x79, 0xb4, 0x0e, 0x16, 0x3a, 0x23, 0xee, 0xa4, 0xe4, 0x2f, 0x24, 0x31, 0xd5, 0x25, + 0x18, 0xc5, 0x05, 0x53, 0xda, 0x0f, 0x15, 0xb4, 0x22, 0x41, 0xe6, 0xd0, 0x32, 0x1f, 0x49, 0x2d, + 0x63, 0xcc, 0x12, 0x66, 0x59, 0xaf, 0xdc, 0x2d, 0xf4, 0xca, 0x95, 0x59, 0x48, 0xa7, 0x37, 0xc9, + 0x40, 0x41, 0x4d, 0x09, 0x7f, 0x3d, 0xf0, 0x69, 0xec, 0x41, 0x84, 0xe1, 0x3e, 0x44, 0xe0, 0x5b, + 0xa0, 0x5e, 0x42, 0x8b, 0x24, 0x74, 0x6e, 0x44, 0x41, 0x1c, 0x26, 0x47, 0x2a, 0x3b, 0xfa, 0xdb, + 0xfb, 0xbb, 0x62, 0x1f, 0x67, 0x08, 0x8e, 0x4e, 0x3d, 0x12, 0xde, 0xe6, 0xd0, 0xa9, 0x1d, 0x9c, + 0x21, 0xd4, 0x16, 0xaa, 0xf9, 0xc4, 0x83, 0x46, 0x4d, 0x20, 0xb3, 0xd8, 0xf7, 0x88, 0x07, 0x58, + 0x48, 0x54, 0x13, 0x55, 0x63, 0xc7, 0x6e, 0x9c, 0x10, 0x80, 0xcb, 0x09, 0xa0, 0x7a, 0x7b, 0x77, + 0xe7, 0x79, 0x7f, 0xe3, 0xa5, 0xb2, 0xbb, 0x86, 0x3d, 0x08, 0x81, 0xea, 0xb7, 0x77, 0x77, 0x30, + 0x57, 0xd6, 0x7e, 0x56, 0xd0, 0x9a, 0x14, 0xe4, 0x1c, 0x46, 0xc0, 0xbe, 0x3c, 0x02, 0x5e, 0x9e, + 0xa1, 0x64, 0x25, 0xbd, 0xff, 0xad, 0x82, 0x5a, 0x12, 0x6e, 0x9f, 0x44, 0xc4, 0x03, 0x06, 0x11, + 0x3d, 0x6e, 0xb1, 0x5a, 0xa8, 0x76, 0xe8, 0xf8, 0xb6, 0x38, 0xab, 0xb9, 0xf4, 0x7f, 0xe0, 0xf8, + 0x36, 0x16, 0x92, 0xac, 0x40, 0xd5, 0xb2, 0x02, 0x69, 0x0f, 0x15, 0x74, 0x71, 0x6a, 0xb7, 0x66, + 0x1c, 0x4a, 0x69, 0x91, 0xdf, 0x46, 0xa7, 0x63, 0x9f, 0xc6, 0x0e, 0xe3, 0xf7, 0x5d, 0x7e, 0x00, + 0x9d, 0xe1, 0xb7, 0xf6, 0x6d, 0x59, 0x84, 0x8b, 0x58, 0xed, 0xc7, 0x4a, 0xa1, 0xbe, 0x62, 0x1c, + 0xde, 0x40, 0x6b, 0xb9, 0x71, 0x40, 0xe9, 0xde, 0xc8, 0x87, 0xf3, 0x89, 0x0f, 0x79, 0xad, 0x21, + 0x00, 0x8f, 0xeb, 0xa8, 0x5f, 0xa1, 0x95, 0x30, 0x9f, 0xea, 0xa4, 0xb5, 0xdf, 0x99, 0xa1, 0xa4, + 0x13, 0x4a, 0x65, 0xae, 0x0d, 0xfa, 0x1b, 0x2b, 0x92, 0x00, 0xcb, 0x76, 0xd4, 0x7d, 0x54, 0x27, + 0xd9, 0x93, 0xe8, 0x26, 0x1f, 0xe9, 0xc3, 0x32, 0x6c, 0xa6, 0xe3, 0x6f, 0x5b, 0x92, 0x3e, 0x1f, + 0xdb, 0xc1, 0x05, 0x7d, 0xed, 0xaf, 0x0a, 0x3a, 0x33, 0x61, 0x3c, 0xa8, 0x5b, 0x08, 0xd9, 0x91, + 0xd3, 0x83, 0x28, 0x97, 0xa4, 0x6c, 0xcc, 0xed, 0x64, 0x12, 0x9c, 0x43, 0xa9, 0x9f, 0x23, 0x34, + 0x62, 0x4f, 0x72, 0xa2, 0x4f, 0xcf, 0x49, 0xf1, 0x81, 0x67, 0xd6, 0x39, 0x7f, 0x6e, 0x37, 0xc7, + 0xa8, 0x52, 0xb4, 0x1c, 0x01, 0x85, 0xa8, 0x07, 0xf6, 0x7b, 0x41, 0xd4, 0xa8, 0x8a, 0x3e, 0x7a, + 0x6b, 0x86, 0xa4, 0x8f, 0x8d, 0x32, 0xf3, 0x4c, 0x12, 0xd2, 0x32, 0x1e, 0x11, 0xe3, 0xbc, 0x15, + 0xb5, 0x8d, 0xce, 0xd9, 0x40, 0x72, 0x6e, 0x7e, 0x19, 0x03, 0x65, 0x60, 0x8b, 0x09, 0xb5, 0x68, + 0x5e, 0x4c, 0x08, 0xce, 0xed, 0x4c, 0x02, 0xe1, 0xc9, 0xba, 0xda, 0xef, 0x0a, 0x3a, 0x27, 0x79, + 0xf6, 0x31, 0x78, 0xa1, 0x4b, 0x18, 0xcc, 0xe1, 0x3a, 0xba, 0x2b, 0x5d, 0x47, 0xaf, 0xcf, 0x90, + 0xbe, 0xd4, 0xc9, 0xb2, 0x6b, 0x49, 0xfb, 0x4d, 0x41, 0xe7, 0x27, 0x6a, 0xcc, 0x61, 0xbc, 0xde, + 0x91, 0xc7, 0xeb, 0xab, 0xc7, 0x88, 0xab, 0x64, 0xcc, 0x3e, 0x29, 0x8b, 0xaa, 0x3d, 0x7c, 0xb6, + 0xfe, 0xff, 0xde, 0x0f, 0xda, 0xdf, 0xf2, 0x33, 0x88, 0xd2, 0x39, 0x84, 0x21, 0x4f, 0x94, 0xca, + 0x91, 0x26, 0xca, 0xd8, 0xa0, 0xad, 0xce, 0x38, 0x68, 0x29, 0x3d, 0xde, 0xa0, 0xbd, 0x8b, 0x56, + 0xe4, 0xdb, 0xa7, 0x76, 0xc4, 0x6f, 0x3e, 0x41, 0xdd, 0x96, 0x6e, 0x27, 0x99, 0xa9, 0xf8, 0xf6, + 0xa0, 0xf4, 0xbf, 0xfc, 0xf6, 0xa0, 0xb4, 0xa4, 0x29, 0x7e, 0x91, 0xdf, 0x1e, 0x13, 0xf3, 0x3c, + 0xff, 0xb7, 0x07, 0xff, 0x94, 0xe6, 0x7f, 0x69, 0x48, 0xac, 0xf4, 0x0d, 0x99, 0x7d, 0x4a, 0xef, + 0xa5, 0x02, 0x3c, 0xc2, 0x68, 0xf7, 0x51, 0x5d, 0xfe, 0x0d, 0xe0, 0x58, 0x37, 0x5f, 0x0b, 0xd5, + 0x44, 0xe5, 0x0a, 0xae, 0xef, 0x10, 0x46, 0xb0, 0x90, 0x98, 0xe6, 0xe3, 0x67, 0xcd, 0x85, 0x27, + 0xcf, 0x9a, 0x0b, 0x4f, 0x9f, 0x35, 0x17, 0x1e, 0x0e, 0x9a, 0xca, 0xe3, 0x41, 0x53, 0x79, 0x32, + 0x68, 0x2a, 0x4f, 0x07, 0x4d, 0xe5, 0x8f, 0x41, 0x53, 0x79, 0xf4, 0x67, 0x73, 0xe1, 0xd3, 0x0b, + 0xd3, 0x7e, 0x31, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x67, 0xe4, 0xf6, 0x18, 0x69, 0x12, 0x00, + 0x00, } func (m *AllocationResult) Marshal() (dAtA []byte, err error) { @@ -693,15 +726,24 @@ func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - i -= len(m.ResourceHandle) - copy(dAtA[i:], m.ResourceHandle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceHandle))) - i-- - dAtA[i] = 0xa + if len(m.ResourceHandles) > 0 { + for iNdEx := len(m.ResourceHandles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceHandles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *PodScheduling) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -711,12 +753,12 @@ func (m *PodScheduling) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodScheduling) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodScheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -754,7 +796,7 @@ func (m *PodScheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSchedulingList) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -764,12 +806,12 @@ func (m *PodSchedulingList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingList) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -801,7 +843,7 @@ func (m *PodSchedulingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSchedulingSpec) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -811,12 +853,12 @@ func (m *PodSchedulingSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -838,7 +880,7 @@ func (m *PodSchedulingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSchedulingStatus) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -848,12 +890,12 @@ func (m *PodSchedulingStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1485,6 +1527,39 @@ func (m *ResourceClassParametersReference) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } +func (m *ResourceHandle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceHandle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceHandle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset @@ -1502,8 +1577,12 @@ func (m *AllocationResult) Size() (n int) { } var l int _ = l - l = len(m.ResourceHandle) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ResourceHandles) > 0 { + for _, e := range m.ResourceHandles { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } if m.AvailableOnNodes != nil { l = m.AvailableOnNodes.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -1512,7 +1591,7 @@ func (m *AllocationResult) Size() (n int) { return n } -func (m *PodScheduling) Size() (n int) { +func (m *PodSchedulingContext) Size() (n int) { if m == nil { return 0 } @@ -1527,7 +1606,7 @@ func (m *PodScheduling) Size() (n int) { return n } -func (m *PodSchedulingList) Size() (n int) { +func (m *PodSchedulingContextList) Size() (n int) { if m == nil { return 0 } @@ -1544,7 +1623,7 @@ func (m *PodSchedulingList) Size() (n int) { return n } -func (m *PodSchedulingSpec) Size() (n int) { +func (m *PodSchedulingContextSpec) Size() (n int) { if m == nil { return 0 } @@ -1561,7 +1640,7 @@ func (m *PodSchedulingSpec) Size() (n int) { return n } -func (m *PodSchedulingStatus) Size() (n int) { +func (m *PodSchedulingContextStatus) Size() (n int) { if m == nil { return 0 } @@ -1794,6 +1873,19 @@ func (m *ResourceClassParametersReference) Size() (n int) { return n } +func (m *ResourceHandle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func sovGenerated(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1804,54 +1896,59 @@ func (this *AllocationResult) String() string { if this == nil { return "nil" } + repeatedStringForResourceHandles := "[]ResourceHandle{" + for _, f := range this.ResourceHandles { + repeatedStringForResourceHandles += strings.Replace(strings.Replace(f.String(), "ResourceHandle", "ResourceHandle", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceHandles += "}" s := strings.Join([]string{`&AllocationResult{`, - `ResourceHandle:` + fmt.Sprintf("%v", this.ResourceHandle) + `,`, + `ResourceHandles:` + repeatedStringForResourceHandles + `,`, `AvailableOnNodes:` + strings.Replace(fmt.Sprintf("%v", this.AvailableOnNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, `Shareable:` + fmt.Sprintf("%v", this.Shareable) + `,`, `}`, }, "") return s } -func (this *PodScheduling) String() string { +func (this *PodSchedulingContext) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PodScheduling{`, + s := strings.Join([]string{`&PodSchedulingContext{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingSpec", "PodSchedulingSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingStatus", "PodSchedulingStatus", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *PodSchedulingList) String() string { +func (this *PodSchedulingContextList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]PodScheduling{" + repeatedStringForItems := "[]PodSchedulingContext{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodScheduling", "PodScheduling", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSchedulingList{`, + s := strings.Join([]string{`&PodSchedulingContextList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *PodSchedulingSpec) String() string { +func (this *PodSchedulingContextSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PodSchedulingSpec{`, + s := strings.Join([]string{`&PodSchedulingContextSpec{`, `SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`, `PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`, `}`, }, "") return s } -func (this *PodSchedulingStatus) String() string { +func (this *PodSchedulingContextStatus) String() string { if this == nil { return "nil" } @@ -1860,7 +1957,7 @@ func (this *PodSchedulingStatus) String() string { repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + "," } repeatedStringForResourceClaims += "}" - s := strings.Join([]string{`&PodSchedulingStatus{`, + s := strings.Join([]string{`&PodSchedulingContextStatus{`, `ResourceClaims:` + repeatedStringForResourceClaims + `,`, `}`, }, "") @@ -2040,6 +2137,17 @@ func (this *ResourceClassParametersReference) String() string { }, "") return s } +func (this *ResourceHandle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceHandle{`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -2079,9 +2187,9 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandle", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandles", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2091,23 +2199,25 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceHandle = string(dAtA[iNdEx:postIndex]) + m.ResourceHandles = append(m.ResourceHandles, ResourceHandle{}) + if err := m.ResourceHandles[len(m.ResourceHandles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { @@ -2186,7 +2296,7 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodScheduling) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2209,10 +2319,10 @@ func (m *PodScheduling) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodScheduling: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodScheduling: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2335,7 +2445,7 @@ func (m *PodScheduling) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2358,10 +2468,10 @@ func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingList: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2426,7 +2536,7 @@ func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, PodScheduling{}) + m.Items = append(m.Items, PodSchedulingContext{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2452,7 +2562,7 @@ func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingSpec) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2475,10 +2585,10 @@ func (m *PodSchedulingSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingSpec: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2566,7 +2676,7 @@ func (m *PodSchedulingSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingStatus) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2589,10 +2699,10 @@ func (m *PodSchedulingStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingStatus: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4507,6 +4617,120 @@ func (m *ResourceClassParametersReference) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResourceHandle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceHandle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceHandle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/resource/v1alpha1/generated.proto b/vendor/k8s.io/api/resource/v1alpha2/generated.proto similarity index 79% rename from vendor/k8s.io/api/resource/v1alpha1/generated.proto rename to vendor/k8s.io/api/resource/v1alpha2/generated.proto index 2e814d15..02412398 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha2/generated.proto @@ -19,7 +19,7 @@ limitations under the License. syntax = "proto2"; -package k8s.io.api.resource.v1alpha1; +package k8s.io.api.resource.v1alpha2; import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; @@ -27,23 +27,30 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "k8s.io/api/resource/v1alpha1"; +option go_package = "k8s.io/api/resource/v1alpha2"; -// AllocationResult contains attributed of an allocated resource. +// AllocationResult contains attributes of an allocated resource. message AllocationResult { - // ResourceHandle contains arbitrary data returned by the driver after a - // successful allocation. This is opaque for - // Kubernetes. Driver documentation may explain to users how to - // interpret this data if needed. + // ResourceHandles contain the state associated with an allocation that + // should be maintained throughout the lifetime of a claim. Each + // ResourceHandle contains data that should be passed to a specific kubelet + // plugin once it lands on a node. This data is returned by the driver + // after a successful allocation and is opaque to Kubernetes. Driver + // documentation may explain to users how to interpret this data if needed. // - // The maximum size of this field is 16KiB. This may get - // increased in the future, but not reduced. + // Setting this field is optional. It has a maximum size of 32 entries. + // If null (or empty), it is assumed this allocation will be processed by a + // single kubelet plugin with no ResourceHandle data attached. The name of + // the kubelet plugin invoked will match the DriverName set in the + // ResourceClaimStatus this AllocationResult is embedded in. + // + // +listType=atomic // +optional - optional string resourceHandle = 1; + repeated ResourceHandle resourceHandles = 1; - // This field will get set by the resource driver after it has - // allocated the resource driver to inform the scheduler where it can - // schedule Pods using the ResourceClaim. + // This field will get set by the resource driver after it has allocated + // the resource to inform the scheduler where it can schedule Pods using + // the ResourceClaim. // // Setting this field is optional. If null, the resource is available // everywhere. @@ -56,37 +63,37 @@ message AllocationResult { optional bool shareable = 3; } -// PodScheduling objects hold information that is needed to schedule +// PodSchedulingContext objects hold information that is needed to schedule // a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation // mode. // // This is an alpha type and requires enabling the DynamicResourceAllocation // feature gate. -message PodScheduling { +message PodSchedulingContext { // Standard object metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec describes where resources for the Pod are needed. - optional PodSchedulingSpec spec = 2; + optional PodSchedulingContextSpec spec = 2; // Status describes where resources for the Pod can be allocated. // +optional - optional PodSchedulingStatus status = 3; + optional PodSchedulingContextStatus status = 3; } -// PodSchedulingList is a collection of Pod scheduling objects. -message PodSchedulingList { +// PodSchedulingContextList is a collection of Pod scheduling objects. +message PodSchedulingContextList { // Standard list metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of PodScheduling objects. - repeated PodScheduling items = 2; + // Items is the list of PodSchedulingContext objects. + repeated PodSchedulingContext items = 2; } -// PodSchedulingSpec describes where resources for the Pod are needed. -message PodSchedulingSpec { +// PodSchedulingContextSpec describes where resources for the Pod are needed. +message PodSchedulingContextSpec { // SelectedNode is the node for which allocation of ResourceClaims that // are referenced by the Pod and that use "WaitForFirstConsumer" // allocation is to be attempted. @@ -105,8 +112,8 @@ message PodSchedulingSpec { repeated string potentialNodes = 2; } -// PodSchedulingStatus describes where resources for the Pod can be allocated. -message PodSchedulingStatus { +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +message PodSchedulingContextStatus { // ResourceClaims describes resource availability for each // pod.spec.resourceClaim entry where the corresponding ResourceClaim // uses "WaitForFirstConsumer" allocation mode. @@ -235,9 +242,9 @@ message ResourceClaimStatus { // +optional optional string driverName = 1; - // Allocation is set by the resource driver once a resource has been - // allocated successfully. If this is not specified, the resource is - // not yet allocated. + // Allocation is set by the resource driver once a resource or set of + // resources has been allocated successfully. If this is not specified, the + // resources have not been allocated yet. // +optional optional AllocationResult allocation = 2; @@ -370,3 +377,24 @@ message ResourceClassParametersReference { optional string namespace = 4; } +// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. +message ResourceHandle { + // DriverName specifies the name of the resource driver whose kubelet + // plugin should be invoked to process this ResourceHandle's data once it + // lands on a node. This may differ from the DriverName set in + // ResourceClaimStatus this ResourceHandle is embedded in. + optional string driverName = 1; + + // Data contains the opaque data associated with this ResourceHandle. It is + // set by the controller component of the resource driver whose name + // matches the DriverName set in the ResourceClaimStatus this + // ResourceHandle is embedded in. It is set at allocation time and is + // intended for processing by the kubelet plugin whose name matches + // the DriverName set in this ResourceHandle. + // + // The maximum size of this field is 16KiB. This may get increased in the + // future, but not reduced. + // +optional + optional string data = 2; +} + diff --git a/vendor/k8s.io/api/resource/v1alpha1/register.go b/vendor/k8s.io/api/resource/v1alpha2/register.go similarity index 95% rename from vendor/k8s.io/api/resource/v1alpha1/register.go rename to vendor/k8s.io/api/resource/v1alpha2/register.go index 8245b9ae..6e0d7ceb 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/register.go +++ b/vendor/k8s.io/api/resource/v1alpha2/register.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( const GroupName = "resource.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -50,8 +50,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ResourceClaimList{}, &ResourceClaimTemplate{}, &ResourceClaimTemplateList{}, - &PodScheduling{}, - &PodSchedulingList{}, + &PodSchedulingContext{}, + &PodSchedulingContextList{}, ) // Add common types diff --git a/vendor/k8s.io/api/resource/v1alpha1/types.go b/vendor/k8s.io/api/resource/v1alpha2/types.go similarity index 81% rename from vendor/k8s.io/api/resource/v1alpha1/types.go rename to vendor/k8s.io/api/resource/v1alpha2/types.go index af570384..21936bfe 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/types.go +++ b/vendor/k8s.io/api/resource/v1alpha2/types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/api/core/v1" @@ -99,9 +99,9 @@ type ResourceClaimStatus struct { // +optional DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - // Allocation is set by the resource driver once a resource has been - // allocated successfully. If this is not specified, the resource is - // not yet allocated. + // Allocation is set by the resource driver once a resource or set of + // resources has been allocated successfully. If this is not specified, the + // resources have not been allocated yet. // +optional Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,2,opt,name=allocation"` @@ -133,21 +133,28 @@ type ResourceClaimStatus struct { // claim.status.reservedFor. const ResourceClaimReservedForMaxSize = 32 -// AllocationResult contains attributed of an allocated resource. +// AllocationResult contains attributes of an allocated resource. type AllocationResult struct { - // ResourceHandle contains arbitrary data returned by the driver after a - // successful allocation. This is opaque for - // Kubernetes. Driver documentation may explain to users how to - // interpret this data if needed. + // ResourceHandles contain the state associated with an allocation that + // should be maintained throughout the lifetime of a claim. Each + // ResourceHandle contains data that should be passed to a specific kubelet + // plugin once it lands on a node. This data is returned by the driver + // after a successful allocation and is opaque to Kubernetes. Driver + // documentation may explain to users how to interpret this data if needed. // - // The maximum size of this field is 16KiB. This may get - // increased in the future, but not reduced. + // Setting this field is optional. It has a maximum size of 32 entries. + // If null (or empty), it is assumed this allocation will be processed by a + // single kubelet plugin with no ResourceHandle data attached. The name of + // the kubelet plugin invoked will match the DriverName set in the + // ResourceClaimStatus this AllocationResult is embedded in. + // + // +listType=atomic // +optional - ResourceHandle string `json:"resourceHandle,omitempty" protobuf:"bytes,1,opt,name=resourceHandle"` + ResourceHandles []ResourceHandle `json:"resourceHandles,omitempty" protobuf:"bytes,1,opt,name=resourceHandles"` - // This field will get set by the resource driver after it has - // allocated the resource driver to inform the scheduler where it can - // schedule Pods using the ResourceClaim. + // This field will get set by the resource driver after it has allocated + // the resource to inform the scheduler where it can schedule Pods using + // the ResourceClaim. // // Setting this field is optional. If null, the resource is available // everywhere. @@ -160,8 +167,33 @@ type AllocationResult struct { Shareable bool `json:"shareable,omitempty" protobuf:"varint,3,opt,name=shareable"` } -// ResourceHandleMaxSize is the maximum size of allocation.resourceHandle. -const ResourceHandleMaxSize = 16 * 1024 +// AllocationResultResourceHandlesMaxSize represents the maximum number of +// entries in allocation.resourceHandles. +const AllocationResultResourceHandlesMaxSize = 32 + +// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. +type ResourceHandle struct { + // DriverName specifies the name of the resource driver whose kubelet + // plugin should be invoked to process this ResourceHandle's data once it + // lands on a node. This may differ from the DriverName set in + // ResourceClaimStatus this ResourceHandle is embedded in. + DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` + + // Data contains the opaque data associated with this ResourceHandle. It is + // set by the controller component of the resource driver whose name + // matches the DriverName set in the ResourceClaimStatus this + // ResourceHandle is embedded in. It is set at allocation time and is + // intended for processing by the kubelet plugin whose name matches + // the DriverName set in this ResourceHandle. + // + // The maximum size of this field is 16KiB. This may get increased in the + // future, but not reduced. + // +optional + Data string `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` +} + +// ResourceHandleDataMaxSize represents the maximum size of resourceHandle.data. +const ResourceHandleDataMaxSize = 16 * 1024 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 @@ -181,28 +213,28 @@ type ResourceClaimList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 -// PodScheduling objects hold information that is needed to schedule +// PodSchedulingContext objects hold information that is needed to schedule // a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation // mode. // // This is an alpha type and requires enabling the DynamicResourceAllocation // feature gate. -type PodScheduling struct { +type PodSchedulingContext struct { metav1.TypeMeta `json:",inline"` // Standard object metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec describes where resources for the Pod are needed. - Spec PodSchedulingSpec `json:"spec" protobuf:"bytes,2,name=spec"` + Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"` // Status describes where resources for the Pod can be allocated. // +optional - Status PodSchedulingStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// PodSchedulingSpec describes where resources for the Pod are needed. -type PodSchedulingSpec struct { +// PodSchedulingContextSpec describes where resources for the Pod are needed. +type PodSchedulingContextSpec struct { // SelectedNode is the node for which allocation of ResourceClaims that // are referenced by the Pod and that use "WaitForFirstConsumer" // allocation is to be attempted. @@ -221,8 +253,8 @@ type PodSchedulingSpec struct { PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` } -// PodSchedulingStatus describes where resources for the Pod can be allocated. -type PodSchedulingStatus struct { +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +type PodSchedulingContextStatus struct { // ResourceClaims describes resource availability for each // pod.spec.resourceClaim entry where the corresponding ResourceClaim // uses "WaitForFirstConsumer" allocation mode. @@ -257,22 +289,22 @@ type ResourceClaimSchedulingStatus struct { } // PodSchedulingNodeListMaxSize defines the maximum number of entries in the -// node lists that are stored in PodScheduling objects. This limit is part +// node lists that are stored in PodSchedulingContext objects. This limit is part // of the API. const PodSchedulingNodeListMaxSize = 128 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 -// PodSchedulingList is a collection of Pod scheduling objects. -type PodSchedulingList struct { +// PodSchedulingContextList is a collection of Pod scheduling objects. +type PodSchedulingContextList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of PodScheduling objects. - Items []PodScheduling `json:"items" protobuf:"bytes,2,rep,name=items"` + // Items is the list of PodSchedulingContext objects. + Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient diff --git a/vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go similarity index 76% rename from vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go index 6836dbfb..474be8c8 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more @@ -24,13 +24,13 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AllocationResult = map[string]string{ - "": "AllocationResult contains attributed of an allocated resource.", - "resourceHandle": "ResourceHandle contains arbitrary data returned by the driver after a successful allocation. This is opaque for Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", - "availableOnNodes": "This field will get set by the resource driver after it has allocated the resource driver to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.", + "": "AllocationResult contains attributes of an allocated resource.", + "resourceHandles": "ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in.", + "availableOnNodes": "This field will get set by the resource driver after it has allocated the resource to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.", "shareable": "Shareable determines whether the resource supports more than one consumer at a time.", } @@ -38,44 +38,44 @@ func (AllocationResult) SwaggerDoc() map[string]string { return map_AllocationResult } -var map_PodScheduling = map[string]string{ - "": "PodScheduling objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", +var map_PodSchedulingContext = map[string]string{ + "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "metadata": "Standard object metadata", "spec": "Spec describes where resources for the Pod are needed.", "status": "Status describes where resources for the Pod can be allocated.", } -func (PodScheduling) SwaggerDoc() map[string]string { - return map_PodScheduling +func (PodSchedulingContext) SwaggerDoc() map[string]string { + return map_PodSchedulingContext } -var map_PodSchedulingList = map[string]string{ - "": "PodSchedulingList is a collection of Pod scheduling objects.", +var map_PodSchedulingContextList = map[string]string{ + "": "PodSchedulingContextList is a collection of Pod scheduling objects.", "metadata": "Standard list metadata", - "items": "Items is the list of PodScheduling objects.", + "items": "Items is the list of PodSchedulingContext objects.", } -func (PodSchedulingList) SwaggerDoc() map[string]string { - return map_PodSchedulingList +func (PodSchedulingContextList) SwaggerDoc() map[string]string { + return map_PodSchedulingContextList } -var map_PodSchedulingSpec = map[string]string{ - "": "PodSchedulingSpec describes where resources for the Pod are needed.", +var map_PodSchedulingContextSpec = map[string]string{ + "": "PodSchedulingContextSpec describes where resources for the Pod are needed.", "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", } -func (PodSchedulingSpec) SwaggerDoc() map[string]string { - return map_PodSchedulingSpec +func (PodSchedulingContextSpec) SwaggerDoc() map[string]string { + return map_PodSchedulingContextSpec } -var map_PodSchedulingStatus = map[string]string{ - "": "PodSchedulingStatus describes where resources for the Pod can be allocated.", +var map_PodSchedulingContextStatus = map[string]string{ + "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", } -func (PodSchedulingStatus) SwaggerDoc() map[string]string { - return map_PodSchedulingStatus +func (PodSchedulingContextStatus) SwaggerDoc() map[string]string { + return map_PodSchedulingContextStatus } var map_ResourceClaim = map[string]string{ @@ -146,7 +146,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", "driverName": "DriverName is a copy of the driver name from the ResourceClass at the time when allocation started.", - "allocation": "Allocation is set by the resource driver once a resource has been allocated successfully. If this is not specified, the resource is not yet allocated.", + "allocation": "Allocation is set by the resource driver once a resource or set of resources has been allocated successfully. If this is not specified, the resources have not been allocated yet.", "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", "deallocationRequested": "DeallocationRequested indicates that a ResourceClaim is to be deallocated.\n\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor.", } @@ -219,4 +219,14 @@ func (ResourceClassParametersReference) SwaggerDoc() map[string]string { return map_ResourceClassParametersReference } +var map_ResourceHandle = map[string]string{ + "": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.", + "driverName": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.", + "data": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", +} + +func (ResourceHandle) SwaggerDoc() map[string]string { + return map_ResourceHandle +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/resource/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go similarity index 88% rename from vendor/k8s.io/api/resource/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go index c00fbfd1..89d521bf 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go @@ -19,7 +19,7 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/api/core/v1" @@ -29,6 +29,11 @@ import ( // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { *out = *in + if in.ResourceHandles != nil { + in, out := &in.ResourceHandles, &out.ResourceHandles + *out = make([]ResourceHandle, len(*in)) + copy(*out, *in) + } if in.AvailableOnNodes != nil { in, out := &in.AvailableOnNodes, &out.AvailableOnNodes *out = new(v1.NodeSelector) @@ -48,7 +53,7 @@ func (in *AllocationResult) DeepCopy() *AllocationResult { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodScheduling) DeepCopyInto(out *PodScheduling) { +func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -57,18 +62,18 @@ func (in *PodScheduling) DeepCopyInto(out *PodScheduling) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodScheduling. -func (in *PodScheduling) DeepCopy() *PodScheduling { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext. +func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext { if in == nil { return nil } - out := new(PodScheduling) + out := new(PodSchedulingContext) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodScheduling) DeepCopyObject() runtime.Object { +func (in *PodSchedulingContext) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -76,13 +81,13 @@ func (in *PodScheduling) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) { +func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]PodScheduling, len(*in)) + *out = make([]PodSchedulingContext, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -90,18 +95,18 @@ func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingList. -func (in *PodSchedulingList) DeepCopy() *PodSchedulingList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList. +func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList { if in == nil { return nil } - out := new(PodSchedulingList) + out := new(PodSchedulingContextList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingList) DeepCopyObject() runtime.Object { +func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -109,7 +114,7 @@ func (in *PodSchedulingList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) { +func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) { *out = *in if in.PotentialNodes != nil { in, out := &in.PotentialNodes, &out.PotentialNodes @@ -119,18 +124,18 @@ func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingSpec. -func (in *PodSchedulingSpec) DeepCopy() *PodSchedulingSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec. +func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec { if in == nil { return nil } - out := new(PodSchedulingSpec) + out := new(PodSchedulingContextSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) { +func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) { *out = *in if in.ResourceClaims != nil { in, out := &in.ResourceClaims, &out.ResourceClaims @@ -142,12 +147,12 @@ func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingStatus. -func (in *PodSchedulingStatus) DeepCopy() *PodSchedulingStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus. +func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus { if in == nil { return nil } - out := new(PodSchedulingStatus) + out := new(PodSchedulingContextStatus) in.DeepCopyInto(out) return out } @@ -475,3 +480,19 @@ func (in *ResourceClassParametersReference) DeepCopy() *ResourceClassParametersR in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceHandle) DeepCopyInto(out *ResourceHandle) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHandle. +func (in *ResourceHandle) DeepCopy() *ResourceHandle { + if in == nil { + return nil + } + out := new(ResourceHandle) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index afc09077..c1a27e8b 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -37,7 +37,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -54,7 +54,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go index 0f298942..146bae40 100644 --- a/vendor/k8s.io/api/scheduling/v1/types.go +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -34,7 +34,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -51,7 +51,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go index ac34c531..f167e197 100644 --- a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 5c60b7ab..f0878fb1 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -38,7 +38,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -55,7 +55,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go index 7b0df486..26ba8ff5 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -35,7 +35,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -52,7 +52,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index fa25f969..557005db 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 44b49ea2..43878184 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -38,7 +38,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -55,7 +55,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go index e315e1b3..6f88592c 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -39,7 +39,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -56,7 +56,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go index cbc140f4..f42008eb 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index d3c425c0..5f8eccae 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -46,7 +46,7 @@ message CSIDriver { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; } @@ -79,16 +79,15 @@ message CSIDriverSpec { // +optional optional bool attachRequired = 1; - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -110,29 +109,27 @@ message CSIDriverSpec { optional bool podInfoOnMount = 2; // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. + // A driver can support one or more of these modes and more modes may be added in the future. // + // This field is beta. // This field is immutable. // // +optional // +listType=set repeated string volumeLifecycleModes = 3; - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -149,7 +146,7 @@ message CSIDriverSpec { // +featureGate=CSIStorageCapacity optional bool storageCapacity = 4; - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -159,10 +156,11 @@ message CSIDriverSpec { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional optional string fsGroupPolicy = 5; - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -182,7 +180,7 @@ message CSIDriverSpec { // +listType=atomic repeated TokenRequest tokenRequests = 6; - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -193,7 +191,7 @@ message CSIDriverSpec { // +optional optional bool requiresRepublish = 7; - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -211,6 +209,7 @@ message CSIDriverSpec { // // Default is "false". // + // +featureGate=SELinuxMountReadWriteOncePod // +optional optional bool seLinuxMount = 8; } @@ -225,6 +224,7 @@ message CSIDriverSpec { // enough that it doesn't create this object. // CSINode has an OwnerReference that points to the corresponding node object. message CSINode { + // Standard object's metadata. // metadata.name must be the Kubernetes node name. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -234,7 +234,7 @@ message CSINode { // CSINodeDriver holds information about the specification of one CSI driver installed on a node message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. optional string name = 1; @@ -314,11 +314,11 @@ message CSINodeSpec { // the scheduler assumes that capacity is insufficient and tries some other // node. message CSIStorageCapacity { - // Standard object's metadata. The name has no particular meaning. It must be - // be a DNS subdomain (dots allowed, 253 characters). To ensure that - // there are no conflicts with other CSI drivers on the cluster, the recommendation - // is to use csisc-, a generated name, or a reverse-domain name which ends - // with the unique CSI driver name. + // Standard object's metadata. + // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). + // To ensure that there are no conflicts with other CSI drivers on the cluster, + // the recommendation is to use csisc-, a generated name, or a reverse-domain name + // which ends with the unique CSI driver name. // // Objects are namespaced. // @@ -326,7 +326,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -335,7 +335,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -343,7 +343,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -355,7 +355,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -377,7 +377,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -394,36 +394,36 @@ message StorageClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. optional string provisioner = 2; - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional map parameters = 3; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional optional string reclaimPolicy = 4; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional repeated string mountOptions = 5; - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand. // +optional optional bool allowVolumeExpansion = 6; - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional optional string volumeBindingMode = 7; - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -439,17 +439,17 @@ message StorageClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of StorageClasses + // items is the list of StorageClasses repeated StorageClass items = 2; } // TokenRequest contains parameters of a service account token. message TokenRequest { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. optional string audience = 1; - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". // // +optional @@ -466,11 +466,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -484,7 +484,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -493,7 +493,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -509,39 +509,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -550,11 +550,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -563,7 +563,7 @@ message VolumeError { // VolumeNodeResources is a set of resource limits for scheduling of volumes. message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is not specified, then the supported number of volumes on this node is unbounded. diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index f57099df..c785f368 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -33,41 +33,42 @@ import ( // according to etcd is in ObjectMeta.Name. type StorageClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand. // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -81,12 +82,13 @@ type StorageClass struct { // StorageClassList is a collection of storage classes. type StorageClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of StorageClasses + // items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -122,11 +124,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -138,25 +140,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -165,7 +168,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -181,26 +184,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -209,11 +212,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -242,7 +245,7 @@ type CSIDriver struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } @@ -279,16 +282,15 @@ type CSIDriverSpec struct { // +optional AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -310,29 +312,27 @@ type CSIDriverSpec struct { PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. + // A driver can support one or more of these modes and more modes may be added in the future. // + // This field is beta. // This field is immutable. // // +optional // +listType=set VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -349,7 +349,7 @@ type CSIDriverSpec struct { // +featureGate=CSIStorageCapacity StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"` - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -359,10 +359,11 @@ type CSIDriverSpec struct { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -382,7 +383,7 @@ type CSIDriverSpec struct { // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -393,7 +394,7 @@ type CSIDriverSpec struct { // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -411,6 +412,7 @@ type CSIDriverSpec struct { // // Default is "false". // + // +featureGate=SELinuxMountReadWriteOncePod // +optional SELinuxMount *bool `json:"seLinuxMount,omitempty" protobuf:"varint,8,opt,name=seLinuxMount"` } @@ -453,12 +455,11 @@ type VolumeLifecycleMode string // TokenRequest contains parameters of a service account token. type TokenRequest struct { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. - // Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". // // +optional @@ -502,6 +503,7 @@ const ( type CSINode struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // metadata.name must be the Kubernetes node name. metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -520,7 +522,7 @@ type CSINodeSpec struct { // CSINodeDriver holds information about the specification of one CSI driver installed on a node type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -557,7 +559,7 @@ type CSINodeDriver struct { // VolumeNodeResources is a set of resource limits for scheduling of volumes. type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is not specified, then the supported number of volumes on this node is unbounded. @@ -609,11 +611,12 @@ type CSINodeList struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` - // Standard object's metadata. The name has no particular meaning. It must be - // be a DNS subdomain (dots allowed, 253 characters). To ensure that - // there are no conflicts with other CSI drivers on the cluster, the recommendation - // is to use csisc-, a generated name, or a reverse-domain name which ends - // with the unique CSI driver name. + + // Standard object's metadata. + // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). + // To ensure that there are no conflicts with other CSI drivers on the cluster, + // the recommendation is to use csisc-, a generated name, or a reverse-domain name + // which ends with the unique CSI driver name. // // Objects are namespaced. // @@ -621,7 +624,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -630,7 +633,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -638,7 +641,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -650,7 +653,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -670,12 +673,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 1a069bb4..c92a7f95 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", + "spec": "spec represents the specification of the CSI Driver.", } func (CSIDriver) SwaggerDoc() map[string]string { @@ -50,13 +50,13 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", - "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", - "seLinuxMount": "SELinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", + "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -65,7 +65,7 @@ func (CSIDriverSpec) SwaggerDoc() map[string]string { var map_CSINode = map[string]string{ "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", - "metadata": "metadata.name must be the Kubernetes node name.", + "metadata": "Standard object's metadata. metadata.name must be the Kubernetes node name.", "spec": "spec is the specification of CSINode", } @@ -75,7 +75,7 @@ func (CSINode) SwaggerDoc() map[string]string { var map_CSINodeDriver = map[string]string{ "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "name": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", "allocatable": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.", @@ -106,11 +106,11 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", - "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "metadata": "Standard object's metadata. The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -120,7 +120,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -130,13 +130,13 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "provisioner": "provisioner indicates the type of the provisioner.", + "parameters": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "mountOptions": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "allowVolumeExpansion shows whether the storage class allow volume expand.", + "volumeBindingMode": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { @@ -146,7 +146,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", + "items": "items is the list of StorageClasses", } func (StorageClassList) SwaggerDoc() map[string]string { @@ -155,8 +155,8 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_TokenRequest = map[string]string{ "": "TokenRequest contains parameters of a service account token.", - "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", - "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", + "audience": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -166,8 +166,8 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -177,7 +177,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -186,7 +186,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -195,9 +195,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -206,10 +206,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -218,8 +218,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { @@ -228,7 +228,7 @@ func (VolumeError) SwaggerDoc() map[string]string { var map_VolumeNodeResources = map[string]string{ "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", + "count": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", } func (VolumeNodeResources) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index a5345122..88250a0f 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -67,7 +67,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -76,7 +76,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -84,7 +84,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -96,7 +96,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -118,7 +118,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -134,11 +134,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -152,7 +152,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -161,7 +161,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -177,39 +177,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -218,11 +218,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive // information. // +optional diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index fe8c9e3c..59ef348a 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -41,11 +41,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -60,25 +60,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -87,7 +88,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -103,26 +104,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -131,11 +132,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive // information. // +optional @@ -174,6 +175,7 @@ type VolumeError struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that // there are no conflicts with other CSI drivers on the cluster, the recommendation @@ -186,7 +188,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -195,7 +197,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -203,7 +205,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -215,7 +217,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -238,12 +240,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index a228a3fe..ba6afbd5 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -43,7 +43,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -53,8 +53,8 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -64,7 +64,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -73,7 +73,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -93,10 +93,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -105,8 +105,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index bedbd318..2b354dd4 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -49,7 +49,7 @@ message CSIDriver { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; } @@ -82,16 +82,15 @@ message CSIDriverSpec { // +optional optional bool attachRequired = 1; - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -112,14 +111,14 @@ message CSIDriverSpec { // +optional optional bool podInfoOnMount = 2; - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html // A driver can support one or more of these modes and @@ -130,10 +129,9 @@ message CSIDriverSpec { // +optional repeated string volumeLifecycleModes = 3; - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -149,7 +147,7 @@ message CSIDriverSpec { // +optional optional bool storageCapacity = 4; - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -159,10 +157,11 @@ message CSIDriverSpec { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional optional string fsGroupPolicy = 5; - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -182,7 +181,7 @@ message CSIDriverSpec { // +listType=atomic repeated TokenRequest tokenRequests = 6; - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -193,7 +192,7 @@ message CSIDriverSpec { // +optional optional bool requiresRepublish = 7; - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -211,6 +210,7 @@ message CSIDriverSpec { // // Default is "false". // + // +featureGate=SELinuxMountReadWriteOncePod // +optional optional bool seLinuxMount = 8; } @@ -236,7 +236,7 @@ message CSINode { // CSINodeDriver holds information about the specification of one CSI driver installed on a node message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. optional string name = 1; @@ -327,7 +327,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -336,7 +336,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -344,7 +344,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -356,7 +356,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -378,7 +378,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -395,36 +395,36 @@ message StorageClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. optional string provisioner = 2; - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional map parameters = 3; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional optional string reclaimPolicy = 4; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional repeated string mountOptions = 5; - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand // +optional optional bool allowVolumeExpansion = 6; - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional optional string volumeBindingMode = 7; - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -440,17 +440,17 @@ message StorageClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of StorageClasses + // items is the list of StorageClasses repeated StorageClass items = 2; } // TokenRequest contains parameters of a service account token. message TokenRequest { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. optional string audience = 1; - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" // // +optional @@ -467,11 +467,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -485,7 +485,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -494,7 +494,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -510,39 +510,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -551,11 +551,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -564,7 +564,7 @@ message VolumeError { // VolumeNodeResources is a set of resource limits for scheduling of volumes. message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is nil, then the supported number of volumes on this node is unbounded. diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index f4d09b64..4c39b49c 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -36,41 +36,42 @@ import ( // according to etcd is in ObjectMeta.Name. type StorageClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -87,12 +88,13 @@ type StorageClass struct { // StorageClassList is a collection of storage classes. type StorageClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of StorageClasses + // items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -130,11 +132,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -149,25 +151,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -176,7 +179,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -192,26 +195,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -220,11 +223,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -259,7 +262,7 @@ type CSIDriver struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } @@ -299,16 +302,15 @@ type CSIDriverSpec struct { // +optional AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -329,14 +331,14 @@ type CSIDriverSpec struct { // +optional PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html // A driver can support one or more of these modes and @@ -347,11 +349,9 @@ type CSIDriverSpec struct { // +optional VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. - // + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -367,7 +367,7 @@ type CSIDriverSpec struct { // +optional StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"` - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -377,10 +377,11 @@ type CSIDriverSpec struct { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -400,7 +401,7 @@ type CSIDriverSpec struct { // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -411,7 +412,7 @@ type CSIDriverSpec struct { // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -429,6 +430,7 @@ type CSIDriverSpec struct { // // Default is "false". // + // +featureGate=SELinuxMountReadWriteOncePod // +optional SELinuxMount *bool `json:"seLinuxMount,omitempty" protobuf:"varint,8,opt,name=seLinuxMount"` } @@ -466,12 +468,11 @@ type VolumeLifecycleMode string // TokenRequest contains parameters of a service account token. type TokenRequest struct { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. - // Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" // // +optional @@ -539,7 +540,7 @@ type CSINodeSpec struct { // CSINodeDriver holds information about the specification of one CSI driver installed on a node type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -575,7 +576,7 @@ type CSINodeDriver struct { // VolumeNodeResources is a set of resource limits for scheduling of volumes. type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is nil, then the supported number of volumes on this node is unbounded. @@ -634,6 +635,7 @@ type CSINodeList struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that // there are no conflicts with other CSI drivers on the cluster, the recommendation @@ -646,7 +648,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -655,7 +657,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -663,7 +665,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -675,7 +677,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -698,12 +700,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index ea3c1e4c..0f2718b9 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", + "spec": "spec represents the specification of the CSI Driver.", } func (CSIDriver) SwaggerDoc() map[string]string { @@ -50,13 +50,13 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", - "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", - "seLinuxMount": "SELinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", + "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -75,7 +75,7 @@ func (CSINode) SwaggerDoc() map[string]string { var map_CSINodeDriver = map[string]string{ "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "name": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", "allocatable": "allocatable represents the volume resources of a node that are available for scheduling.", @@ -107,10 +107,10 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -120,7 +120,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -130,13 +130,13 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "provisioner": "provisioner indicates the type of the provisioner.", + "parameters": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "mountOptions": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "allowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { @@ -146,7 +146,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", + "items": "items is the list of StorageClasses", } func (StorageClassList) SwaggerDoc() map[string]string { @@ -155,8 +155,8 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_TokenRequest = map[string]string{ "": "TokenRequest contains parameters of a service account token.", - "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", - "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", + "audience": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -166,8 +166,8 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -177,7 +177,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -186,7 +186,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -195,9 +195,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -206,10 +206,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -218,8 +218,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { @@ -228,7 +228,7 @@ func (VolumeError) SwaggerDoc() map[string]string { var map_VolumeNodeResources = map[string]string{ "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", + "count": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", } func (VolumeNodeResources) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go index f402c416..04ce206b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -196,6 +196,18 @@ type ValidationRule struct { // If unset, the message is "failed rule: {Rule}". // e.g. "must be a URL with the host matching spec.host" Message string + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + MessageExpression string } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go index 68eb0808..5dbb38c8 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go @@ -814,198 +814,199 @@ func init() { } var fileDescriptor_f5a35c9667703937 = []byte{ - // 3047 bytes of a gzipped FileDescriptorProto + // 3072 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdf, 0x6f, 0x24, 0x47, 0xf1, 0xbf, 0x59, 0xff, 0x5a, 0xb7, 0xed, 0xb3, 0xdd, 0x77, 0xf6, 0x77, 0xce, 0xb9, 0xf3, 0xfa, 0x36, 0xdf, 0x1c, 0x4e, 0x72, 0x59, 0x27, 0x26, 0x21, 0x47, 0x84, 0x40, 0x5e, 0xdb, 0x97, 0x38, - 0x67, 0x9f, 0xad, 0xde, 0xbb, 0x8b, 0x93, 0x00, 0xc9, 0x78, 0xa7, 0xbd, 0x9e, 0x78, 0x7e, 0x5d, - 0xf7, 0xcc, 0xda, 0x96, 0x40, 0x8a, 0x40, 0x11, 0x10, 0x09, 0xc2, 0x03, 0x0a, 0x4f, 0x08, 0x21, - 0x94, 0x07, 0x78, 0x80, 0x37, 0xf8, 0x17, 0xf2, 0x82, 0x94, 0x27, 0x14, 0x09, 0x69, 0x45, 0x96, - 0x7f, 0x00, 0x09, 0x10, 0xc2, 0x0f, 0x08, 0xf5, 0x8f, 0xe9, 0xe9, 0x9d, 0xdd, 0xbd, 0x3b, 0xd9, - 0xeb, 0xe4, 0xcd, 0xae, 0xaa, 0xae, 0x4f, 0x75, 0x75, 0x75, 0x55, 0x75, 0xcd, 0x02, 0x6b, 0xff, - 0x06, 0x2d, 0x39, 0xc1, 0xc2, 0x7e, 0xbc, 0x83, 0x89, 0x8f, 0x23, 0x4c, 0x17, 0xea, 0xd8, 0xb7, - 0x03, 0xb2, 0x20, 0x19, 0x56, 0xe8, 0xe0, 0xc3, 0x08, 0xfb, 0xd4, 0x09, 0x7c, 0xfa, 0x8c, 0x15, - 0x3a, 0x14, 0x93, 0x3a, 0x26, 0x0b, 0xe1, 0x7e, 0x8d, 0xf1, 0x68, 0xab, 0xc0, 0x42, 0xfd, 0xb9, - 0x85, 0x1a, 0xf6, 0x31, 0xb1, 0x22, 0x6c, 0x97, 0x42, 0x12, 0x44, 0x01, 0xbc, 0x21, 0x34, 0x95, - 0x5a, 0x04, 0xdf, 0x52, 0x9a, 0x4a, 0xe1, 0x7e, 0x8d, 0xf1, 0x68, 0xab, 0x40, 0xa9, 0xfe, 0xdc, - 0xcc, 0x33, 0x35, 0x27, 0xda, 0x8b, 0x77, 0x4a, 0xd5, 0xc0, 0x5b, 0xa8, 0x05, 0xb5, 0x60, 0x81, - 0x2b, 0xdc, 0x89, 0x77, 0xf9, 0x7f, 0xfc, 0x1f, 0xfe, 0x97, 0x00, 0x9a, 0x79, 0x3e, 0x35, 0xd9, + 0x67, 0x9f, 0xad, 0xde, 0xbb, 0x8b, 0x93, 0x20, 0x92, 0xf1, 0x4e, 0x7b, 0x3d, 0xf1, 0xfc, 0xba, + 0xee, 0x99, 0xb5, 0x2d, 0x81, 0x14, 0x81, 0x22, 0x20, 0x12, 0x84, 0x07, 0x14, 0x9e, 0x10, 0x42, + 0x28, 0x48, 0xf0, 0x00, 0x6f, 0xf0, 0x2f, 0xe4, 0x05, 0x29, 0x4f, 0x28, 0x12, 0xd2, 0x8a, 0x2c, + 0xff, 0x00, 0x12, 0x20, 0x84, 0x1f, 0x10, 0xea, 0x1f, 0xd3, 0xd3, 0x3b, 0xbb, 0x7b, 0x77, 0xb2, + 0xd7, 0xc9, 0xdb, 0x6e, 0x55, 0x75, 0x7d, 0xaa, 0xab, 0xab, 0xab, 0xab, 0xab, 0x07, 0x58, 0xfb, + 0x37, 0x68, 0xc9, 0x09, 0x16, 0xf6, 0xe3, 0x1d, 0x4c, 0x7c, 0x1c, 0x61, 0xba, 0x50, 0xc7, 0xbe, + 0x1d, 0x90, 0x05, 0xc9, 0xb0, 0x42, 0x07, 0x1f, 0x46, 0xd8, 0xa7, 0x4e, 0xe0, 0xd3, 0x67, 0xac, + 0xd0, 0xa1, 0x98, 0xd4, 0x31, 0x59, 0x08, 0xf7, 0x6b, 0x8c, 0x47, 0x5b, 0x05, 0x16, 0xea, 0xcf, + 0x2d, 0xd4, 0xb0, 0x8f, 0x89, 0x15, 0x61, 0xbb, 0x14, 0x92, 0x20, 0x0a, 0xe0, 0x0d, 0xa1, 0xa9, + 0xd4, 0x22, 0xf8, 0x96, 0xd2, 0x54, 0x0a, 0xf7, 0x6b, 0x8c, 0x47, 0x5b, 0x05, 0x4a, 0xf5, 0xe7, + 0x66, 0x9e, 0xa9, 0x39, 0xd1, 0x5e, 0xbc, 0x53, 0xaa, 0x06, 0xde, 0x42, 0x2d, 0xa8, 0x05, 0x0b, + 0x5c, 0xe1, 0x4e, 0xbc, 0xcb, 0xff, 0xf1, 0x3f, 0xfc, 0x97, 0x00, 0x9a, 0x79, 0x3e, 0x35, 0xd9, 0xb3, 0xaa, 0x7b, 0x8e, 0x8f, 0xc9, 0x51, 0x6a, 0xa7, 0x87, 0x23, 0xab, 0x83, 0x79, 0x33, 0x0b, - 0xdd, 0x56, 0x91, 0xd8, 0x8f, 0x1c, 0x0f, 0xb7, 0x2d, 0xf8, 0xca, 0xc3, 0x16, 0xd0, 0xea, 0x1e, - 0xf6, 0xac, 0xec, 0xba, 0xe2, 0xb1, 0x01, 0x26, 0x97, 0x03, 0xbf, 0x8e, 0x09, 0xdb, 0x20, 0xc2, + 0xdd, 0x46, 0x91, 0xd8, 0x8f, 0x1c, 0x0f, 0xb7, 0x0d, 0xf8, 0xca, 0xc3, 0x06, 0xd0, 0xea, 0x1e, + 0xf6, 0xac, 0xec, 0xb8, 0xe2, 0xb1, 0x01, 0x26, 0x97, 0x03, 0xbf, 0x8e, 0x09, 0x9b, 0x20, 0xc2, 0xf7, 0x63, 0x4c, 0x23, 0x58, 0x06, 0x7d, 0xb1, 0x63, 0x9b, 0xc6, 0x9c, 0x31, 0x3f, 0x5c, 0x7e, 0xf6, 0xe3, 0x46, 0xe1, 0x5c, 0xb3, 0x51, 0xe8, 0xbb, 0xbb, 0xb6, 0x72, 0xdc, 0x28, 0x5c, 0xed, - 0x86, 0x14, 0x1d, 0x85, 0x98, 0x96, 0xee, 0xae, 0xad, 0x20, 0xb6, 0x18, 0xbe, 0x0c, 0x26, 0x6d, + 0x86, 0x14, 0x1d, 0x85, 0x98, 0x96, 0xee, 0xae, 0xad, 0x20, 0x36, 0x18, 0xbe, 0x0c, 0x26, 0x6d, 0x4c, 0x1d, 0x82, 0xed, 0xa5, 0xad, 0xb5, 0x7b, 0x42, 0xbf, 0x99, 0xe3, 0x1a, 0x2f, 0x49, 0x8d, - 0x93, 0x2b, 0x59, 0x01, 0xd4, 0xbe, 0x06, 0x6e, 0x83, 0xa1, 0x60, 0xe7, 0x1d, 0x5c, 0x8d, 0xa8, - 0xd9, 0x37, 0xd7, 0x37, 0x3f, 0xb2, 0xf8, 0x4c, 0x29, 0x3d, 0x3c, 0x65, 0x02, 0x3f, 0x31, 0xb9, - 0xd9, 0x12, 0xb2, 0x0e, 0x56, 0x93, 0x43, 0x2b, 0x8f, 0x4b, 0xb4, 0xa1, 0x4d, 0xa1, 0x05, 0x25, - 0xea, 0x8a, 0xbf, 0xce, 0x01, 0xa8, 0x6f, 0x9e, 0x86, 0x81, 0x4f, 0x71, 0x4f, 0x76, 0x4f, 0xc1, + 0x93, 0x2b, 0x59, 0x01, 0xd4, 0x3e, 0x06, 0x6e, 0x83, 0xa1, 0x60, 0xe7, 0x1d, 0x5c, 0x8d, 0xa8, + 0xd9, 0x37, 0xd7, 0x37, 0x3f, 0xb2, 0xf8, 0x4c, 0x29, 0x5d, 0x3c, 0x65, 0x02, 0x5f, 0x31, 0x39, + 0xd9, 0x12, 0xb2, 0x0e, 0x56, 0x93, 0x45, 0x2b, 0x8f, 0x4b, 0xb4, 0xa1, 0x4d, 0xa1, 0x05, 0x25, + 0xea, 0x8a, 0xbf, 0xca, 0x01, 0xa8, 0x4f, 0x9e, 0x86, 0x81, 0x4f, 0x71, 0x4f, 0x66, 0x4f, 0xc1, 0x44, 0x95, 0x6b, 0x8e, 0xb0, 0x2d, 0x71, 0xcd, 0xdc, 0x49, 0xac, 0x37, 0x25, 0xfe, 0xc4, 0x72, 0x46, 0x1d, 0x6a, 0x03, 0x80, 0x77, 0xc0, 0x20, 0xc1, 0x34, 0x76, 0x23, 0xb3, 0x6f, 0xce, 0x98, 0x1f, 0x59, 0xbc, 0xde, 0x15, 0x8a, 0x87, 0x36, 0x0b, 0xbe, 0x52, 0xfd, 0xb9, 0x52, 0x25, 0xb2, 0xa2, 0x98, 0x96, 0xcf, 0x4b, 0xa4, 0x41, 0xc4, 0x75, 0x20, 0xa9, 0xab, 0xf8, 0x5f, 0x03, 0x4c, 0xe8, 0x5e, 0xaa, 0x3b, 0xf8, 0x00, 0x12, 0x30, 0x44, 0x44, 0xb0, 0x70, 0x3f, 0x8d, 0x2c, 0xde, - 0x2a, 0x9d, 0xf4, 0x46, 0x95, 0xda, 0xe2, 0xaf, 0x3c, 0xc2, 0x8e, 0x4b, 0xfe, 0x83, 0x12, 0x20, - 0x58, 0x07, 0x79, 0x22, 0xcf, 0x88, 0x07, 0xd2, 0xc8, 0xe2, 0x7a, 0x6f, 0x40, 0x85, 0xce, 0xf2, - 0x68, 0xb3, 0x51, 0xc8, 0x27, 0xff, 0x21, 0x85, 0x55, 0xfc, 0x65, 0x0e, 0xcc, 0x2e, 0xc7, 0x34, - 0x0a, 0x3c, 0x84, 0x69, 0x10, 0x93, 0x2a, 0x5e, 0x0e, 0xdc, 0xd8, 0xf3, 0x57, 0xf0, 0xae, 0xe3, - 0x3b, 0x11, 0x8b, 0xd1, 0x39, 0xd0, 0xef, 0x5b, 0x1e, 0x96, 0x31, 0x33, 0x2a, 0x3d, 0xd9, 0x7f, - 0xdb, 0xf2, 0x30, 0xe2, 0x1c, 0x26, 0xc1, 0x42, 0x44, 0xde, 0x00, 0x25, 0x71, 0xe7, 0x28, 0xc4, - 0x88, 0x73, 0xe0, 0x35, 0x30, 0xb8, 0x1b, 0x10, 0xcf, 0x12, 0xa7, 0x37, 0x9c, 0x9e, 0xc7, 0x4d, - 0x4e, 0x45, 0x92, 0x0b, 0x5f, 0x00, 0x23, 0x36, 0xa6, 0x55, 0xe2, 0x84, 0x0c, 0xda, 0xec, 0xe7, - 0xc2, 0x17, 0xa4, 0xf0, 0xc8, 0x4a, 0xca, 0x42, 0xba, 0x1c, 0xbc, 0x0e, 0xf2, 0x21, 0x71, 0x02, - 0xe2, 0x44, 0x47, 0xe6, 0xc0, 0x9c, 0x31, 0x3f, 0x50, 0x9e, 0x90, 0x6b, 0xf2, 0x5b, 0x92, 0x8e, - 0x94, 0x04, 0x93, 0x7e, 0x87, 0x06, 0xfe, 0x96, 0x15, 0xed, 0x99, 0x83, 0x1c, 0x41, 0x49, 0xbf, - 0x5a, 0xd9, 0xbc, 0xcd, 0xe8, 0x48, 0x49, 0x14, 0xff, 0x6c, 0x00, 0x33, 0xeb, 0xa1, 0xc4, 0xbd, - 0xf0, 0x26, 0xc8, 0xd3, 0x88, 0xe5, 0x9c, 0xda, 0x91, 0xf4, 0xcf, 0x53, 0x89, 0xaa, 0x8a, 0xa4, - 0x1f, 0x37, 0x0a, 0xd3, 0xe9, 0x8a, 0x84, 0xca, 0x7d, 0xa3, 0xd6, 0xb2, 0x90, 0x3b, 0xc0, 0x3b, - 0x7b, 0x41, 0xb0, 0x2f, 0x4f, 0xff, 0x14, 0x21, 0xf7, 0x9a, 0x50, 0x94, 0x62, 0x8a, 0x90, 0x93, - 0x64, 0x94, 0x00, 0x15, 0xff, 0x93, 0xcb, 0x6e, 0x4c, 0x3b, 0xf4, 0xb7, 0x41, 0x9e, 0x5d, 0x21, - 0xdb, 0x8a, 0x2c, 0x79, 0x09, 0x9e, 0x7d, 0xb4, 0x0b, 0x27, 0xee, 0xeb, 0x06, 0x8e, 0xac, 0x32, - 0x94, 0xae, 0x00, 0x29, 0x0d, 0x29, 0xad, 0xf0, 0x10, 0xf4, 0xd3, 0x10, 0x57, 0xe5, 0x7e, 0xef, - 0x9d, 0x22, 0xda, 0xbb, 0xec, 0xa1, 0x12, 0xe2, 0x6a, 0x1a, 0x8c, 0xec, 0x3f, 0xc4, 0x11, 0xe1, - 0xbb, 0x06, 0x18, 0xa4, 0x3c, 0x2f, 0xc8, 0x5c, 0xb2, 0x7d, 0x06, 0xe0, 0x99, 0xbc, 0x23, 0xfe, - 0x47, 0x12, 0xb7, 0xf8, 0xcf, 0x1c, 0xb8, 0xda, 0x6d, 0xe9, 0x72, 0xe0, 0xdb, 0xe2, 0x10, 0xd6, - 0xe4, 0xbd, 0x12, 0x91, 0xf5, 0x82, 0x7e, 0xaf, 0x8e, 0x1b, 0x85, 0x27, 0x1e, 0xaa, 0x40, 0xbb, - 0x80, 0x5f, 0x55, 0x5b, 0x16, 0x97, 0xf4, 0x6a, 0xab, 0x61, 0xc7, 0x8d, 0xc2, 0xb8, 0x5a, 0xd6, - 0x6a, 0x2b, 0xac, 0x03, 0xe8, 0x5a, 0x34, 0xba, 0x43, 0x2c, 0x9f, 0x0a, 0xb5, 0x8e, 0x87, 0xa5, - 0xe7, 0x9e, 0x7a, 0xb4, 0xa0, 0x60, 0x2b, 0xca, 0x33, 0x12, 0x12, 0xae, 0xb7, 0x69, 0x43, 0x1d, - 0x10, 0x58, 0xce, 0x20, 0xd8, 0xa2, 0x2a, 0x0d, 0x68, 0x39, 0x9c, 0x51, 0x91, 0xe4, 0xc2, 0x27, - 0xc1, 0x90, 0x87, 0x29, 0xb5, 0x6a, 0x98, 0xdf, 0xfd, 0xe1, 0xb4, 0x28, 0x6e, 0x08, 0x32, 0x4a, - 0xf8, 0xc5, 0x7f, 0x19, 0xe0, 0x72, 0x37, 0xaf, 0xad, 0x3b, 0x34, 0x82, 0xdf, 0x6c, 0x0b, 0xfb, - 0xd2, 0xa3, 0xed, 0x90, 0xad, 0xe6, 0x41, 0xaf, 0x52, 0x49, 0x42, 0xd1, 0x42, 0xfe, 0x00, 0x0c, - 0x38, 0x11, 0xf6, 0x92, 0x6a, 0x89, 0x7a, 0x1f, 0x76, 0xe5, 0x31, 0x09, 0x3f, 0xb0, 0xc6, 0x80, - 0x90, 0xc0, 0x2b, 0x7e, 0x94, 0x03, 0x57, 0xba, 0x2d, 0x61, 0x79, 0x9c, 0x32, 0x67, 0x87, 0x6e, - 0x4c, 0x2c, 0x57, 0x06, 0x9b, 0x72, 0xf6, 0x16, 0xa7, 0x22, 0xc9, 0x65, 0xb9, 0x93, 0x3a, 0x7e, - 0x2d, 0x76, 0x2d, 0x22, 0x23, 0x49, 0x6d, 0xb8, 0x22, 0xe9, 0x48, 0x49, 0xc0, 0x12, 0x00, 0x74, - 0x2f, 0x20, 0x11, 0xc7, 0xe0, 0x1d, 0xce, 0x70, 0xf9, 0x3c, 0xcb, 0x08, 0x15, 0x45, 0x45, 0x9a, - 0x04, 0x2b, 0x24, 0xfb, 0x8e, 0x6f, 0xcb, 0x03, 0x57, 0x77, 0xf7, 0x96, 0xe3, 0xdb, 0x88, 0x73, - 0x18, 0xbe, 0xeb, 0xd0, 0x88, 0x51, 0xe4, 0x69, 0xb7, 0x38, 0x9c, 0x4b, 0x2a, 0x09, 0x86, 0x5f, - 0x65, 0x09, 0x36, 0x20, 0x0e, 0xa6, 0xe6, 0x60, 0x8a, 0xbf, 0xac, 0xa8, 0x48, 0x93, 0x28, 0xfe, - 0xa5, 0xbf, 0x7b, 0x7c, 0xb0, 0x04, 0x02, 0x1f, 0x07, 0x03, 0x35, 0x12, 0xc4, 0xa1, 0xf4, 0x92, - 0xf2, 0xf6, 0xcb, 0x8c, 0x88, 0x04, 0x0f, 0x7e, 0x07, 0x0c, 0xf8, 0x72, 0xc3, 0x2c, 0x82, 0x5e, - 0xeb, 0xfd, 0x31, 0x73, 0x6f, 0xa5, 0xe8, 0xc2, 0x91, 0x02, 0x14, 0x3e, 0x0f, 0x06, 0x68, 0x35, - 0x08, 0xb1, 0x74, 0xe2, 0x6c, 0x22, 0x54, 0x61, 0xc4, 0xe3, 0x46, 0x61, 0x2c, 0x51, 0xc7, 0x09, - 0x48, 0x08, 0xc3, 0x1f, 0x18, 0x20, 0x2f, 0xcb, 0x05, 0x35, 0x87, 0x78, 0x78, 0xbe, 0xde, 0x7b, - 0xbb, 0x65, 0xdb, 0x9b, 0x9e, 0x99, 0x24, 0x50, 0xa4, 0xc0, 0xe1, 0xf7, 0x0c, 0x00, 0xaa, 0xaa, - 0x76, 0x99, 0xc3, 0xdc, 0x87, 0x3d, 0xbb, 0x2a, 0x5a, 0x55, 0x14, 0x81, 0x90, 0xb6, 0x4a, 0x1a, - 0x2a, 0xac, 0x80, 0xa9, 0x90, 0x60, 0xae, 0xfb, 0xae, 0xbf, 0xef, 0x07, 0x07, 0xfe, 0x4d, 0x07, - 0xbb, 0x36, 0x35, 0xc1, 0x9c, 0x31, 0x9f, 0x2f, 0x5f, 0x91, 0xf6, 0x4f, 0x6d, 0x75, 0x12, 0x42, - 0x9d, 0xd7, 0x16, 0xdf, 0xeb, 0xcb, 0xf6, 0x5a, 0xd9, 0x7a, 0x01, 0x3f, 0x10, 0x9b, 0x17, 0x79, - 0x98, 0x9a, 0x06, 0x3f, 0x88, 0x37, 0x7b, 0x7f, 0x10, 0x2a, 0xd7, 0xa7, 0x45, 0x5a, 0x91, 0x28, - 0xd2, 0x4c, 0x80, 0x3f, 0x33, 0xc0, 0x98, 0x55, 0xad, 0xe2, 0x30, 0xc2, 0xb6, 0xb8, 0xc6, 0xb9, - 0xb3, 0x8d, 0xea, 0x29, 0x69, 0xd0, 0xd8, 0x92, 0x8e, 0x8a, 0x5a, 0x8d, 0x80, 0x2f, 0x81, 0xf3, - 0x34, 0x0a, 0x08, 0xb6, 0x93, 0x08, 0x92, 0xd9, 0x05, 0x36, 0x1b, 0x85, 0xf3, 0x95, 0x16, 0x0e, - 0xca, 0x48, 0x16, 0x3f, 0x19, 0x00, 0x85, 0x87, 0x44, 0xe8, 0x23, 0x34, 0xbd, 0xd7, 0xc0, 0x20, - 0xdf, 0xa9, 0xcd, 0x1d, 0x92, 0xd7, 0x4a, 0x3d, 0xa7, 0x22, 0xc9, 0x65, 0xe5, 0x89, 0xe1, 0xb3, - 0xf2, 0xd4, 0xc7, 0x05, 0x55, 0x79, 0xaa, 0x08, 0x32, 0x4a, 0xf8, 0x70, 0x11, 0x00, 0x1b, 0x87, - 0x04, 0xb3, 0x8c, 0x64, 0x9b, 0x43, 0x5c, 0x5a, 0x9d, 0xcf, 0x8a, 0xe2, 0x20, 0x4d, 0x0a, 0xde, - 0x04, 0x30, 0xf9, 0xcf, 0x09, 0xfc, 0xd7, 0x2c, 0xe2, 0x3b, 0x7e, 0xcd, 0xcc, 0x73, 0xb3, 0xa7, - 0x59, 0xb5, 0x5d, 0x69, 0xe3, 0xa2, 0x0e, 0x2b, 0x60, 0x1d, 0x0c, 0x8a, 0x67, 0x34, 0xcf, 0x1b, - 0x3d, 0xbc, 0x71, 0xf7, 0x2c, 0xd7, 0xb1, 0x39, 0x54, 0x19, 0x70, 0xf7, 0x70, 0x14, 0x24, 0xd1, - 0xe0, 0xfb, 0x06, 0x18, 0xa5, 0xf1, 0x0e, 0x91, 0xd2, 0x94, 0x67, 0xf5, 0x91, 0xc5, 0x3b, 0xbd, - 0x82, 0xaf, 0x68, 0xba, 0xcb, 0x13, 0xcd, 0x46, 0x61, 0x54, 0xa7, 0xa0, 0x16, 0x6c, 0xf8, 0x07, - 0x03, 0x98, 0x96, 0x2d, 0x42, 0xdf, 0x72, 0xb7, 0x88, 0xe3, 0x47, 0x98, 0x88, 0x07, 0x91, 0x28, - 0x1f, 0x3d, 0xec, 0x15, 0xb3, 0xef, 0xac, 0xf2, 0x9c, 0x3c, 0x69, 0x73, 0xa9, 0x8b, 0x05, 0xa8, - 0xab, 0x6d, 0xc5, 0x7f, 0x1b, 0xd9, 0xd4, 0xa2, 0xed, 0xb2, 0x52, 0xb5, 0x5c, 0x0c, 0x57, 0xc0, - 0x04, 0xeb, 0x7e, 0x11, 0x0e, 0x5d, 0xa7, 0x6a, 0x51, 0xfe, 0xfa, 0x11, 0xd1, 0xad, 0x9e, 0xe1, - 0x95, 0x0c, 0x1f, 0xb5, 0xad, 0x80, 0xaf, 0x02, 0x28, 0xda, 0xc2, 0x16, 0x3d, 0xa2, 0x13, 0x50, - 0x0d, 0x5e, 0xa5, 0x4d, 0x02, 0x75, 0x58, 0x05, 0x97, 0xc1, 0xa4, 0x6b, 0xed, 0x60, 0xb7, 0x82, - 0x5d, 0x5c, 0x8d, 0x02, 0xc2, 0x55, 0x89, 0xf7, 0xe1, 0x54, 0xb3, 0x51, 0x98, 0x5c, 0xcf, 0x32, - 0x51, 0xbb, 0x7c, 0xf1, 0x6a, 0xf6, 0x2e, 0xeb, 0x1b, 0x17, 0xcd, 0xf6, 0x87, 0x39, 0x30, 0xd3, - 0x3d, 0x28, 0xe0, 0x77, 0x55, 0x6b, 0x2c, 0x3a, 0xbe, 0xd7, 0xcf, 0x20, 0xf4, 0xe4, 0x73, 0x00, - 0xb4, 0x3f, 0x05, 0xe0, 0x11, 0xab, 0xd7, 0x96, 0x9b, 0x3c, 0xfb, 0xb7, 0xcf, 0x02, 0x9d, 0xe9, - 0x2f, 0x0f, 0x8b, 0x2e, 0xc0, 0x72, 0x79, 0xd1, 0xb7, 0x5c, 0x5c, 0xfc, 0xa8, 0xed, 0x69, 0x9b, - 0x5e, 0x56, 0xf8, 0x43, 0x03, 0x8c, 0x07, 0x21, 0xf6, 0x97, 0xb6, 0xd6, 0xee, 0x7d, 0x59, 0x5c, - 0x5a, 0xe9, 0xa0, 0xb5, 0x93, 0x9b, 0xc8, 0xde, 0xd7, 0x42, 0xd7, 0x16, 0x09, 0x42, 0x5a, 0xbe, - 0xd0, 0x6c, 0x14, 0xc6, 0x37, 0x5b, 0x51, 0x50, 0x16, 0xb6, 0xe8, 0x81, 0xa9, 0xd5, 0xc3, 0x08, - 0x13, 0xdf, 0x72, 0x57, 0x82, 0x6a, 0xec, 0x61, 0x3f, 0x12, 0x36, 0x66, 0xc6, 0x05, 0xc6, 0x23, - 0x8e, 0x0b, 0xae, 0x80, 0xbe, 0x98, 0xb8, 0x32, 0x6a, 0x47, 0xd4, 0x10, 0x0c, 0xad, 0x23, 0x46, - 0x2f, 0x5e, 0x05, 0xfd, 0xcc, 0x4e, 0x78, 0x09, 0xf4, 0x11, 0xeb, 0x80, 0x6b, 0x1d, 0x2d, 0x0f, - 0x31, 0x11, 0x64, 0x1d, 0x20, 0x46, 0x2b, 0xfe, 0x7d, 0x0e, 0x8c, 0x67, 0xf6, 0x02, 0x67, 0x40, - 0x4e, 0x4d, 0xd6, 0x80, 0x54, 0x9a, 0x5b, 0x5b, 0x41, 0x39, 0xc7, 0x86, 0x2f, 0xaa, 0xec, 0x2a, - 0x40, 0x0b, 0xaa, 0x58, 0x70, 0x2a, 0x6b, 0xcb, 0x52, 0x75, 0xcc, 0x90, 0x24, 0x3d, 0x32, 0x1b, - 0xf0, 0xae, 0xbc, 0x15, 0xc2, 0x06, 0xbc, 0x8b, 0x18, 0xed, 0xa4, 0xb3, 0x92, 0x64, 0x58, 0x33, - 0xf0, 0x08, 0xc3, 0x9a, 0xc1, 0x07, 0x0e, 0x6b, 0x1e, 0x07, 0x03, 0x91, 0x13, 0xb9, 0x98, 0x57, - 0x2a, 0xad, 0x19, 0xbe, 0xc3, 0x88, 0x48, 0xf0, 0x20, 0x06, 0x43, 0x36, 0xde, 0xb5, 0x62, 0x37, - 0xe2, 0x45, 0x69, 0x64, 0xf1, 0xeb, 0xa7, 0x8b, 0x1e, 0x31, 0xcc, 0x58, 0x11, 0x2a, 0x51, 0xa2, - 0x1b, 0x3e, 0x01, 0x86, 0x3c, 0xeb, 0xd0, 0xf1, 0x62, 0x8f, 0x77, 0x8c, 0x86, 0x10, 0xdb, 0x10, - 0x24, 0x94, 0xf0, 0x58, 0x12, 0xc4, 0x87, 0x55, 0x37, 0xa6, 0x4e, 0x1d, 0x4b, 0xa6, 0x6c, 0xe9, - 0x54, 0x12, 0x5c, 0xcd, 0xf0, 0x51, 0xdb, 0x0a, 0x0e, 0xe6, 0xf8, 0x7c, 0xf1, 0x88, 0x06, 0x26, - 0x48, 0x28, 0xe1, 0xb5, 0x82, 0x49, 0xf9, 0xd1, 0x6e, 0x60, 0x72, 0x71, 0xdb, 0x0a, 0xf8, 0x34, - 0x18, 0xf6, 0xac, 0xc3, 0x75, 0xec, 0xd7, 0xa2, 0x3d, 0x73, 0x6c, 0xce, 0x98, 0xef, 0x2b, 0x8f, - 0x35, 0x1b, 0x85, 0xe1, 0x8d, 0x84, 0x88, 0x52, 0x3e, 0x17, 0x76, 0x7c, 0x29, 0x7c, 0x5e, 0x13, - 0x4e, 0x88, 0x28, 0xe5, 0xb3, 0xce, 0x24, 0xb4, 0x22, 0x76, 0xaf, 0xcc, 0xf1, 0xd6, 0x87, 0xf3, - 0x96, 0x20, 0xa3, 0x84, 0x0f, 0xe7, 0x41, 0xde, 0xb3, 0x0e, 0xf9, 0x9b, 0xd2, 0x9c, 0xe0, 0x6a, - 0xf9, 0x40, 0x71, 0x43, 0xd2, 0x90, 0xe2, 0x72, 0x49, 0xc7, 0x17, 0x92, 0x93, 0x9a, 0xa4, 0xa4, - 0x21, 0xc5, 0x65, 0xf1, 0x1b, 0xfb, 0xce, 0xfd, 0x18, 0x0b, 0x61, 0xc8, 0x3d, 0xa3, 0xe2, 0xf7, - 0x6e, 0xca, 0x42, 0xba, 0x1c, 0x7b, 0xd3, 0x79, 0xb1, 0x1b, 0x39, 0xa1, 0x8b, 0x37, 0x77, 0xcd, - 0x0b, 0xdc, 0xff, 0xbc, 0x95, 0xdf, 0x50, 0x54, 0xa4, 0x49, 0xc0, 0xb7, 0x41, 0x3f, 0xf6, 0x63, - 0xcf, 0xbc, 0xc8, 0xcb, 0xf7, 0x69, 0xa3, 0x4f, 0xdd, 0x97, 0x55, 0x3f, 0xf6, 0x10, 0xd7, 0x0c, - 0x5f, 0x04, 0x63, 0x9e, 0x75, 0xc8, 0x92, 0x00, 0x26, 0x11, 0x7b, 0x68, 0x4e, 0xf1, 0x7d, 0x4f, - 0xb2, 0x26, 0x76, 0x43, 0x67, 0xa0, 0x56, 0x39, 0xbe, 0xd0, 0xf1, 0xb5, 0x85, 0xd3, 0xda, 0x42, - 0x9d, 0x81, 0x5a, 0xe5, 0x98, 0x93, 0x09, 0xbe, 0x1f, 0x3b, 0x04, 0xdb, 0xe6, 0xff, 0xf1, 0xbe, - 0x57, 0xce, 0x77, 0x05, 0x0d, 0x29, 0x2e, 0xbc, 0x9f, 0x8c, 0x1c, 0x4c, 0x7e, 0xf9, 0xb6, 0x7a, - 0x96, 0xba, 0x37, 0xc9, 0x12, 0x21, 0xd6, 0x91, 0xa8, 0x2a, 0xfa, 0xb0, 0x01, 0xfa, 0x60, 0xc0, - 0x72, 0xdd, 0xcd, 0x5d, 0xf3, 0x12, 0xf7, 0x78, 0x0f, 0xab, 0x85, 0xca, 0x30, 0x4b, 0x4c, 0x3f, - 0x12, 0x30, 0x0c, 0x2f, 0xf0, 0x59, 0x2c, 0xcc, 0x9c, 0x19, 0xde, 0x26, 0xd3, 0x8f, 0x04, 0x0c, - 0xdf, 0x9f, 0x7f, 0xb4, 0xb9, 0x6b, 0x3e, 0x76, 0x76, 0xfb, 0x63, 0xfa, 0x91, 0x80, 0x81, 0x36, - 0xe8, 0xf3, 0x83, 0xc8, 0xbc, 0xdc, 0xeb, 0xda, 0xcb, 0xab, 0xc9, 0xed, 0x20, 0x42, 0x4c, 0x3d, - 0xfc, 0xb1, 0x01, 0x40, 0x98, 0x46, 0xe2, 0x95, 0xd3, 0x8e, 0x00, 0x32, 0x68, 0xa5, 0x34, 0x7a, - 0x57, 0xfd, 0x88, 0x1c, 0xa5, 0xef, 0x1a, 0x2d, 0xca, 0x35, 0x03, 0xe0, 0x2f, 0x0c, 0x70, 0x51, - 0x6f, 0x77, 0x95, 0x65, 0xb3, 0xdc, 0x0f, 0x9b, 0x3d, 0x0c, 0xe4, 0x72, 0x10, 0xb8, 0x65, 0xb3, - 0xd9, 0x28, 0x5c, 0x5c, 0xea, 0x00, 0x88, 0x3a, 0x9a, 0x01, 0x7f, 0x63, 0x80, 0x49, 0x99, 0x1d, - 0x35, 0xe3, 0x0a, 0xdc, 0x6d, 0x6f, 0xf7, 0xd0, 0x6d, 0x59, 0x08, 0xe1, 0x3d, 0xf5, 0x95, 0xb1, - 0x8d, 0x8f, 0xda, 0xad, 0x82, 0xbf, 0x37, 0xc0, 0xa8, 0x8d, 0x43, 0xec, 0xdb, 0xd8, 0xaf, 0x32, - 0x33, 0xe7, 0x4e, 0x3b, 0x57, 0xc8, 0x9a, 0xb9, 0xa2, 0x69, 0x17, 0x16, 0x96, 0xa4, 0x85, 0xa3, - 0x3a, 0xeb, 0xb8, 0x51, 0x98, 0x4e, 0x97, 0xea, 0x1c, 0xd4, 0x62, 0x20, 0xfc, 0x89, 0x01, 0xc6, - 0x53, 0xb7, 0x8b, 0x02, 0x71, 0xf5, 0x6c, 0x0e, 0x9e, 0xb7, 0xa0, 0x4b, 0xad, 0x58, 0x28, 0x0b, - 0x0e, 0x7f, 0x6b, 0xb0, 0x6e, 0x2b, 0x79, 0xab, 0x51, 0xb3, 0xc8, 0x3d, 0xf8, 0x46, 0x2f, 0x3d, - 0xa8, 0x94, 0x0b, 0x07, 0x5e, 0x4f, 0x3b, 0x39, 0xc5, 0x39, 0x6e, 0x14, 0xa6, 0x74, 0xff, 0x29, - 0x06, 0xd2, 0x8d, 0x83, 0xef, 0x19, 0x60, 0x14, 0xa7, 0x0d, 0x33, 0x35, 0x1f, 0x3f, 0xad, 0xeb, - 0x3a, 0xb6, 0xdf, 0xe2, 0x39, 0xad, 0xb1, 0x28, 0x6a, 0x81, 0x65, 0xbd, 0x1f, 0x3e, 0xb4, 0xbc, - 0xd0, 0xc5, 0xe6, 0xff, 0xf7, 0xae, 0xf7, 0x5b, 0x15, 0x2a, 0x51, 0xa2, 0x1b, 0x5e, 0x07, 0x79, - 0x3f, 0x76, 0x5d, 0x6b, 0xc7, 0xc5, 0xe6, 0x13, 0xbc, 0x8b, 0x50, 0xf3, 0xc5, 0xdb, 0x92, 0x8e, - 0x94, 0x04, 0xdc, 0x05, 0x73, 0x87, 0xb7, 0xd4, 0x8f, 0x2f, 0x3a, 0x0e, 0xf0, 0xcc, 0x6b, 0x5c, - 0xcb, 0x4c, 0xb3, 0x51, 0x98, 0xde, 0xee, 0x3c, 0xe2, 0x7b, 0xa8, 0x0e, 0xf8, 0x26, 0x78, 0x4c, - 0x93, 0x59, 0xf5, 0x76, 0xb0, 0x6d, 0x63, 0x3b, 0x79, 0x68, 0x99, 0x5f, 0xe2, 0x10, 0xea, 0x1e, - 0x6f, 0x67, 0x05, 0xd0, 0x83, 0x56, 0xc3, 0x75, 0x30, 0xad, 0xb1, 0xd7, 0xfc, 0x68, 0x93, 0x54, - 0x22, 0xe2, 0xf8, 0x35, 0x73, 0x9e, 0xeb, 0xbd, 0x98, 0xdc, 0xbe, 0x6d, 0x8d, 0x87, 0xba, 0xac, - 0x81, 0xaf, 0xb4, 0x68, 0xe3, 0x1f, 0x2e, 0xac, 0xf0, 0x16, 0x3e, 0xa2, 0xe6, 0x93, 0xbc, 0xb9, - 0xe0, 0xe7, 0xbc, 0xad, 0xd1, 0x51, 0x17, 0x79, 0xf8, 0x0d, 0x70, 0x21, 0xc3, 0x61, 0xef, 0x0a, - 0xf3, 0x29, 0xf1, 0x40, 0x60, 0x9d, 0xe8, 0x76, 0x42, 0x44, 0x9d, 0x24, 0xe1, 0xd7, 0x00, 0xd4, - 0xc8, 0x1b, 0x56, 0xc8, 0xd7, 0x3f, 0x2d, 0xde, 0x2a, 0xec, 0x44, 0xb7, 0x25, 0x0d, 0x75, 0x90, - 0x83, 0x1f, 0x1a, 0x2d, 0x3b, 0x49, 0x5f, 0xb3, 0xd4, 0xbc, 0xce, 0x2f, 0xec, 0x2b, 0x27, 0x0f, - 0xc0, 0x54, 0x19, 0x8a, 0x5d, 0xac, 0x79, 0x58, 0x43, 0x41, 0x5d, 0xd0, 0x67, 0xd8, 0x63, 0x3a, - 0x93, 0xc3, 0xe1, 0x04, 0xe8, 0xdb, 0xc7, 0xf2, 0xb3, 0x31, 0x62, 0x7f, 0xc2, 0xb7, 0xc0, 0x40, - 0xdd, 0x72, 0xe3, 0x64, 0x14, 0xd0, 0xbb, 0x5a, 0x8f, 0x84, 0xde, 0x97, 0x72, 0x37, 0x8c, 0x99, - 0x0f, 0x0c, 0x30, 0xdd, 0xb9, 0xaa, 0x7c, 0x51, 0x16, 0xfd, 0xdc, 0x00, 0x93, 0x6d, 0x05, 0xa4, - 0x83, 0x31, 0x6e, 0xab, 0x31, 0xf7, 0x7a, 0x58, 0x09, 0xc4, 0x45, 0xe0, 0x1d, 0xad, 0x6e, 0xd9, - 0x8f, 0x0c, 0x30, 0x91, 0x4d, 0xcc, 0x5f, 0x90, 0x97, 0x8a, 0xef, 0xe7, 0xc0, 0x74, 0xe7, 0x1e, - 0x1c, 0x7a, 0x6a, 0xba, 0xd0, 0xf3, 0x01, 0x4d, 0xa7, 0x91, 0xed, 0xbb, 0x06, 0x18, 0x79, 0x47, - 0xc9, 0x25, 0x5f, 0x33, 0x7b, 0x39, 0x15, 0x4a, 0x4a, 0x5f, 0xca, 0xa0, 0x48, 0x87, 0x2c, 0xfe, - 0xce, 0x00, 0x53, 0x1d, 0xcb, 0x39, 0xbc, 0x06, 0x06, 0x2d, 0xd7, 0x0d, 0x0e, 0xc4, 0x34, 0x4f, - 0x1b, 0xcb, 0x2f, 0x71, 0x2a, 0x92, 0x5c, 0xcd, 0x67, 0xb9, 0xcf, 0xc1, 0x67, 0xc5, 0x3f, 0x1a, - 0xe0, 0xf2, 0x83, 0xa2, 0xee, 0xf3, 0x3e, 0xc3, 0x79, 0x90, 0x97, 0xcd, 0xf6, 0x11, 0x3f, 0x3f, - 0x99, 0x5d, 0x65, 0x46, 0xe0, 0xbf, 0x96, 0x11, 0x7f, 0x15, 0x7f, 0x65, 0x80, 0x89, 0x0a, 0x26, - 0x75, 0xa7, 0x8a, 0x11, 0xde, 0xc5, 0x04, 0xfb, 0x55, 0x0c, 0x17, 0xc0, 0x30, 0xff, 0xda, 0x18, - 0x5a, 0xd5, 0xe4, 0x1b, 0xc9, 0xa4, 0x74, 0xf4, 0xf0, 0xed, 0x84, 0x81, 0x52, 0x19, 0xf5, 0x3d, - 0x25, 0xd7, 0xf5, 0x7b, 0xca, 0x65, 0xd0, 0x1f, 0xa6, 0x03, 0xe0, 0x3c, 0xe3, 0xf2, 0x99, 0x2f, - 0xa7, 0x72, 0x6e, 0x40, 0x22, 0x3e, 0xe5, 0x1a, 0x90, 0xdc, 0x80, 0x44, 0x88, 0x53, 0x8b, 0xdf, - 0x02, 0xe7, 0x5b, 0xd3, 0x33, 0xc3, 0x23, 0xb1, 0xdb, 0xf6, 0xfd, 0x86, 0xf1, 0x10, 0xe7, 0xe8, - 0x3f, 0x1b, 0xc8, 0x3d, 0xe4, 0x67, 0x03, 0x7f, 0x32, 0xc0, 0x85, 0xe4, 0x57, 0x35, 0xae, 0x83, - 0xfd, 0x68, 0x39, 0xf0, 0x77, 0x9d, 0x1a, 0xbc, 0x24, 0xe6, 0x88, 0xda, 0x70, 0x2e, 0x99, 0x21, - 0xc2, 0xfb, 0x60, 0x88, 0x0a, 0xa7, 0xc9, 0xf3, 0x7c, 0xf5, 0xe4, 0xe7, 0x99, 0xf5, 0xbe, 0x68, - 0x83, 0x12, 0x6a, 0x82, 0xc3, 0x8e, 0xb4, 0x6a, 0x95, 0x63, 0xdf, 0x96, 0xb3, 0xe4, 0x51, 0x71, - 0xa4, 0xcb, 0x4b, 0x82, 0x86, 0x14, 0xb7, 0xf8, 0x0f, 0x03, 0x4c, 0xb6, 0xfd, 0x4a, 0x08, 0x7e, - 0xdf, 0x00, 0xa3, 0x55, 0x6d, 0x7b, 0xf2, 0x62, 0x6c, 0x9c, 0xfe, 0x97, 0x48, 0x9a, 0x52, 0xd1, - 0x4b, 0xe8, 0x14, 0xd4, 0x02, 0x0a, 0xb7, 0x81, 0x59, 0xcd, 0xfc, 0x20, 0x2f, 0xf3, 0x89, 0xef, - 0x72, 0xb3, 0x51, 0x30, 0x97, 0xbb, 0xc8, 0xa0, 0xae, 0xab, 0xcb, 0xdf, 0xfe, 0xf8, 0xb3, 0xd9, - 0x73, 0x9f, 0x7c, 0x36, 0x7b, 0xee, 0xd3, 0xcf, 0x66, 0xcf, 0xbd, 0xdb, 0x9c, 0x35, 0x3e, 0x6e, - 0xce, 0x1a, 0x9f, 0x34, 0x67, 0x8d, 0x4f, 0x9b, 0xb3, 0xc6, 0x5f, 0x9b, 0xb3, 0xc6, 0x4f, 0xff, - 0x36, 0x7b, 0xee, 0x8d, 0x1b, 0x27, 0xfd, 0x19, 0xee, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc0, - 0x20, 0xb3, 0x2b, 0xda, 0x2b, 0x00, 0x00, + 0x2a, 0x9d, 0x74, 0x47, 0x95, 0xda, 0xe2, 0xaf, 0x3c, 0xc2, 0x96, 0x4b, 0xfe, 0x41, 0x09, 0x10, + 0xac, 0x83, 0x3c, 0x91, 0x6b, 0xc4, 0x03, 0x69, 0x64, 0x71, 0xbd, 0x37, 0xa0, 0x42, 0x67, 0x79, + 0xb4, 0xd9, 0x28, 0xe4, 0x93, 0x7f, 0x48, 0x61, 0x15, 0x7f, 0x91, 0x03, 0xb3, 0xcb, 0x31, 0x8d, + 0x02, 0x0f, 0x61, 0x1a, 0xc4, 0xa4, 0x8a, 0x97, 0x03, 0x37, 0xf6, 0xfc, 0x15, 0xbc, 0xeb, 0xf8, + 0x4e, 0xc4, 0x62, 0x74, 0x0e, 0xf4, 0xfb, 0x96, 0x87, 0x65, 0xcc, 0x8c, 0x4a, 0x4f, 0xf6, 0xdf, + 0xb6, 0x3c, 0x8c, 0x38, 0x87, 0x49, 0xb0, 0x10, 0x91, 0x3b, 0x40, 0x49, 0xdc, 0x39, 0x0a, 0x31, + 0xe2, 0x1c, 0x78, 0x0d, 0x0c, 0xee, 0x06, 0xc4, 0xb3, 0xc4, 0xea, 0x0d, 0xa7, 0xeb, 0x71, 0x93, + 0x53, 0x91, 0xe4, 0xc2, 0x17, 0xc0, 0x88, 0x8d, 0x69, 0x95, 0x38, 0x21, 0x83, 0x36, 0xfb, 0xb9, + 0xf0, 0x05, 0x29, 0x3c, 0xb2, 0x92, 0xb2, 0x90, 0x2e, 0x07, 0xaf, 0x83, 0x7c, 0x48, 0x9c, 0x80, + 0x38, 0xd1, 0x91, 0x39, 0x30, 0x67, 0xcc, 0x0f, 0x94, 0x27, 0xe4, 0x98, 0xfc, 0x96, 0xa4, 0x23, + 0x25, 0xc1, 0xa4, 0xdf, 0xa1, 0x81, 0xbf, 0x65, 0x45, 0x7b, 0xe6, 0x20, 0x47, 0x50, 0xd2, 0xaf, + 0x56, 0x36, 0x6f, 0x33, 0x3a, 0x52, 0x12, 0xc5, 0x3f, 0x1b, 0xc0, 0xcc, 0x7a, 0x28, 0x71, 0x2f, + 0xbc, 0x09, 0xf2, 0x34, 0x62, 0x39, 0xa7, 0x76, 0x24, 0xfd, 0xf3, 0x54, 0xa2, 0xaa, 0x22, 0xe9, + 0xc7, 0x8d, 0xc2, 0x74, 0x3a, 0x22, 0xa1, 0x72, 0xdf, 0xa8, 0xb1, 0x2c, 0xe4, 0x0e, 0xf0, 0xce, + 0x5e, 0x10, 0xec, 0xcb, 0xd5, 0x3f, 0x45, 0xc8, 0xbd, 0x26, 0x14, 0xa5, 0x98, 0x22, 0xe4, 0x24, + 0x19, 0x25, 0x40, 0xc5, 0xff, 0xe4, 0xb2, 0x13, 0xd3, 0x16, 0xfd, 0x6d, 0x90, 0x67, 0x5b, 0xc8, + 0xb6, 0x22, 0x4b, 0x6e, 0x82, 0x67, 0x1f, 0x6d, 0xc3, 0x89, 0xfd, 0xba, 0x81, 0x23, 0xab, 0x0c, + 0xa5, 0x2b, 0x40, 0x4a, 0x43, 0x4a, 0x2b, 0x3c, 0x04, 0xfd, 0x34, 0xc4, 0x55, 0x39, 0xdf, 0x7b, + 0xa7, 0x88, 0xf6, 0x2e, 0x73, 0xa8, 0x84, 0xb8, 0x9a, 0x06, 0x23, 0xfb, 0x87, 0x38, 0x22, 0x7c, + 0xd7, 0x00, 0x83, 0x94, 0xe7, 0x05, 0x99, 0x4b, 0xb6, 0xcf, 0x00, 0x3c, 0x93, 0x77, 0xc4, 0x7f, + 0x24, 0x71, 0x8b, 0xff, 0xcc, 0x81, 0xab, 0xdd, 0x86, 0x2e, 0x07, 0xbe, 0x2d, 0x16, 0x61, 0x4d, + 0xee, 0x2b, 0x11, 0x59, 0x2f, 0xe8, 0xfb, 0xea, 0xb8, 0x51, 0x78, 0xe2, 0xa1, 0x0a, 0xb4, 0x0d, + 0xf8, 0x55, 0x35, 0x65, 0xb1, 0x49, 0xaf, 0xb6, 0x1a, 0x76, 0xdc, 0x28, 0x8c, 0xab, 0x61, 0xad, + 0xb6, 0xc2, 0x3a, 0x80, 0xae, 0x45, 0xa3, 0x3b, 0xc4, 0xf2, 0xa9, 0x50, 0xeb, 0x78, 0x58, 0x7a, + 0xee, 0xa9, 0x47, 0x0b, 0x0a, 0x36, 0xa2, 0x3c, 0x23, 0x21, 0xe1, 0x7a, 0x9b, 0x36, 0xd4, 0x01, + 0x81, 0xe5, 0x0c, 0x82, 0x2d, 0xaa, 0xd2, 0x80, 0x96, 0xc3, 0x19, 0x15, 0x49, 0x2e, 0x7c, 0x12, + 0x0c, 0x79, 0x98, 0x52, 0xab, 0x86, 0xf9, 0xde, 0x1f, 0x4e, 0x0f, 0xc5, 0x0d, 0x41, 0x46, 0x09, + 0xbf, 0xf8, 0x2f, 0x03, 0x5c, 0xee, 0xe6, 0xb5, 0x75, 0x87, 0x46, 0xf0, 0x9b, 0x6d, 0x61, 0x5f, + 0x7a, 0xb4, 0x19, 0xb2, 0xd1, 0x3c, 0xe8, 0x55, 0x2a, 0x49, 0x28, 0x5a, 0xc8, 0x1f, 0x80, 0x01, + 0x27, 0xc2, 0x5e, 0x72, 0x5a, 0xa2, 0xde, 0x87, 0x5d, 0x79, 0x4c, 0xc2, 0x0f, 0xac, 0x31, 0x20, + 0x24, 0xf0, 0x8a, 0x1f, 0xe5, 0xc0, 0x95, 0x6e, 0x43, 0x58, 0x1e, 0xa7, 0xcc, 0xd9, 0xa1, 0x1b, + 0x13, 0xcb, 0x95, 0xc1, 0xa6, 0x9c, 0xbd, 0xc5, 0xa9, 0x48, 0x72, 0x59, 0xee, 0xa4, 0x8e, 0x5f, + 0x8b, 0x5d, 0x8b, 0xc8, 0x48, 0x52, 0x13, 0xae, 0x48, 0x3a, 0x52, 0x12, 0xb0, 0x04, 0x00, 0xdd, + 0x0b, 0x48, 0xc4, 0x31, 0x78, 0x85, 0x33, 0x5c, 0x3e, 0xcf, 0x32, 0x42, 0x45, 0x51, 0x91, 0x26, + 0xc1, 0x0e, 0x92, 0x7d, 0xc7, 0xb7, 0xe5, 0x82, 0xab, 0xbd, 0x7b, 0xcb, 0xf1, 0x6d, 0xc4, 0x39, + 0x0c, 0xdf, 0x75, 0x68, 0xc4, 0x28, 0x72, 0xb5, 0x5b, 0x1c, 0xce, 0x25, 0x95, 0x04, 0xc3, 0xaf, + 0xb2, 0x04, 0x1b, 0x10, 0x07, 0x53, 0x73, 0x30, 0xc5, 0x5f, 0x56, 0x54, 0xa4, 0x49, 0x14, 0xff, + 0xd2, 0xdf, 0x3d, 0x3e, 0x58, 0x02, 0x81, 0x8f, 0x83, 0x81, 0x1a, 0x09, 0xe2, 0x50, 0x7a, 0x49, + 0x79, 0xfb, 0x65, 0x46, 0x44, 0x82, 0x07, 0xbf, 0x0d, 0x06, 0x7c, 0x39, 0x61, 0x16, 0x41, 0xaf, + 0xf5, 0x7e, 0x99, 0xb9, 0xb7, 0x52, 0x74, 0xe1, 0x48, 0x01, 0x0a, 0x9f, 0x07, 0x03, 0xb4, 0x1a, + 0x84, 0x58, 0x3a, 0x71, 0x36, 0x11, 0xaa, 0x30, 0xe2, 0x71, 0xa3, 0x30, 0x96, 0xa8, 0xe3, 0x04, + 0x24, 0x84, 0xe1, 0xf7, 0x0d, 0x90, 0x97, 0xc7, 0x05, 0x35, 0x87, 0x78, 0x78, 0xbe, 0xde, 0x7b, + 0xbb, 0x65, 0xd9, 0x9b, 0xae, 0x99, 0x24, 0x50, 0xa4, 0xc0, 0xe1, 0x77, 0x0d, 0x00, 0xaa, 0xea, + 0xec, 0x32, 0x87, 0xb9, 0x0f, 0x7b, 0xb6, 0x55, 0xb4, 0x53, 0x51, 0x04, 0x42, 0x5a, 0x2a, 0x69, + 0xa8, 0xb0, 0x02, 0xa6, 0x42, 0x82, 0xb9, 0xee, 0xbb, 0xfe, 0xbe, 0x1f, 0x1c, 0xf8, 0x37, 0x1d, + 0xec, 0xda, 0xd4, 0x04, 0x73, 0xc6, 0x7c, 0xbe, 0x7c, 0x45, 0xda, 0x3f, 0xb5, 0xd5, 0x49, 0x08, + 0x75, 0x1e, 0x5b, 0x7c, 0xaf, 0x2f, 0x5b, 0x6b, 0x65, 0xcf, 0x0b, 0xf8, 0x81, 0x98, 0xbc, 0xc8, + 0xc3, 0xd4, 0x34, 0xf8, 0x42, 0xbc, 0xd9, 0xfb, 0x85, 0x50, 0xb9, 0x3e, 0x3d, 0xa4, 0x15, 0x89, + 0x22, 0xcd, 0x04, 0xf8, 0x53, 0x03, 0x8c, 0x59, 0xd5, 0x2a, 0x0e, 0x23, 0x6c, 0x8b, 0x6d, 0x9c, + 0x3b, 0xdb, 0xa8, 0x9e, 0x92, 0x06, 0x8d, 0x2d, 0xe9, 0xa8, 0xa8, 0xd5, 0x08, 0xf8, 0x12, 0x38, + 0x4f, 0xa3, 0x80, 0x60, 0x3b, 0x89, 0x20, 0x99, 0x5d, 0x60, 0xb3, 0x51, 0x38, 0x5f, 0x69, 0xe1, + 0xa0, 0x8c, 0x64, 0xf1, 0x93, 0x01, 0x50, 0x78, 0x48, 0x84, 0x3e, 0x42, 0xd1, 0x7b, 0x0d, 0x0c, + 0xf2, 0x99, 0xda, 0xdc, 0x21, 0x79, 0xed, 0xa8, 0xe7, 0x54, 0x24, 0xb9, 0xec, 0x78, 0x62, 0xf8, + 0xec, 0x78, 0xea, 0xe3, 0x82, 0xea, 0x78, 0xaa, 0x08, 0x32, 0x4a, 0xf8, 0x70, 0x11, 0x00, 0x1b, + 0x87, 0x04, 0xb3, 0x8c, 0x64, 0x9b, 0x43, 0x5c, 0x5a, 0xad, 0xcf, 0x8a, 0xe2, 0x20, 0x4d, 0x0a, + 0xde, 0x04, 0x30, 0xf9, 0xe7, 0x04, 0xfe, 0x6b, 0x16, 0xf1, 0x1d, 0xbf, 0x66, 0xe6, 0xb9, 0xd9, + 0xd3, 0xec, 0xb4, 0x5d, 0x69, 0xe3, 0xa2, 0x0e, 0x23, 0x60, 0x1d, 0x0c, 0x8a, 0x6b, 0x34, 0xcf, + 0x1b, 0x3d, 0xdc, 0x71, 0xf7, 0x2c, 0xd7, 0xb1, 0x39, 0x54, 0x19, 0x70, 0xf7, 0x70, 0x14, 0x24, + 0xd1, 0xe0, 0xfb, 0x06, 0x18, 0xa5, 0xf1, 0x0e, 0x91, 0xd2, 0x94, 0x67, 0xf5, 0x91, 0xc5, 0x3b, + 0xbd, 0x82, 0xaf, 0x68, 0xba, 0xcb, 0x13, 0xcd, 0x46, 0x61, 0x54, 0xa7, 0xa0, 0x16, 0x6c, 0xf8, + 0x07, 0x03, 0x98, 0x96, 0x2d, 0x42, 0xdf, 0x72, 0xb7, 0x88, 0xe3, 0x47, 0x98, 0x88, 0x0b, 0x91, + 0x38, 0x3e, 0x7a, 0x58, 0x2b, 0x66, 0xef, 0x59, 0xe5, 0x39, 0xb9, 0xd2, 0xe6, 0x52, 0x17, 0x0b, + 0x50, 0x57, 0xdb, 0x8a, 0xff, 0x36, 0xb2, 0xa9, 0x45, 0x9b, 0x65, 0xa5, 0x6a, 0xb9, 0x18, 0xae, + 0x80, 0x09, 0x56, 0xfd, 0x22, 0x1c, 0xba, 0x4e, 0xd5, 0xa2, 0xfc, 0xf6, 0x23, 0xa2, 0x5b, 0x5d, + 0xc3, 0x2b, 0x19, 0x3e, 0x6a, 0x1b, 0x01, 0x5f, 0x05, 0x50, 0x94, 0x85, 0x2d, 0x7a, 0x44, 0x25, + 0xa0, 0x0a, 0xbc, 0x4a, 0x9b, 0x04, 0xea, 0x30, 0x0a, 0x2e, 0x83, 0x49, 0xd7, 0xda, 0xc1, 0x6e, + 0x05, 0xbb, 0xb8, 0x1a, 0x05, 0x84, 0xab, 0x12, 0xf7, 0xc3, 0xa9, 0x66, 0xa3, 0x30, 0xb9, 0x9e, + 0x65, 0xa2, 0x76, 0xf9, 0xe2, 0xd5, 0xec, 0x5e, 0xd6, 0x27, 0x2e, 0x8a, 0xed, 0x0f, 0x73, 0x60, + 0xa6, 0x7b, 0x50, 0xc0, 0xef, 0xa8, 0xd2, 0x58, 0x54, 0x7c, 0xaf, 0x9f, 0x41, 0xe8, 0xc9, 0xeb, + 0x00, 0x68, 0xbf, 0x0a, 0xc0, 0x23, 0x76, 0x5e, 0x5b, 0x6e, 0x72, 0xed, 0xdf, 0x3e, 0x0b, 0x74, + 0xa6, 0xbf, 0x3c, 0x2c, 0xaa, 0x00, 0xcb, 0xe5, 0x87, 0xbe, 0xe5, 0xe2, 0xe2, 0x47, 0x6d, 0x57, + 0xdb, 0x74, 0xb3, 0xc2, 0x1f, 0x18, 0x60, 0x3c, 0x08, 0xb1, 0xbf, 0xb4, 0xb5, 0x76, 0xef, 0xcb, + 0x62, 0xd3, 0x4a, 0x07, 0xad, 0x9d, 0xdc, 0x44, 0x76, 0xbf, 0x16, 0xba, 0xb6, 0x48, 0x10, 0xd2, + 0xf2, 0x85, 0x66, 0xa3, 0x30, 0xbe, 0xd9, 0x8a, 0x82, 0xb2, 0xb0, 0x45, 0x0f, 0x4c, 0xad, 0x1e, + 0x46, 0x98, 0xf8, 0x96, 0xbb, 0x12, 0x54, 0x63, 0x0f, 0xfb, 0x91, 0xb0, 0x31, 0xd3, 0x2e, 0x30, + 0x1e, 0xb1, 0x5d, 0x70, 0x05, 0xf4, 0xc5, 0xc4, 0x95, 0x51, 0x3b, 0xa2, 0x9a, 0x60, 0x68, 0x1d, + 0x31, 0x7a, 0xf1, 0x2a, 0xe8, 0x67, 0x76, 0xc2, 0x4b, 0xa0, 0x8f, 0x58, 0x07, 0x5c, 0xeb, 0x68, + 0x79, 0x88, 0x89, 0x20, 0xeb, 0x00, 0x31, 0x5a, 0xf1, 0xef, 0x73, 0x60, 0x3c, 0x33, 0x17, 0x38, + 0x03, 0x72, 0xaa, 0xb3, 0x06, 0xa4, 0xd2, 0xdc, 0xda, 0x0a, 0xca, 0x39, 0x36, 0x7c, 0x51, 0x65, + 0x57, 0x01, 0x5a, 0x50, 0x87, 0x05, 0xa7, 0xb2, 0xb2, 0x2c, 0x55, 0xc7, 0x0c, 0x49, 0xd2, 0x23, + 0xb3, 0x01, 0xef, 0xca, 0x5d, 0x21, 0x6c, 0xc0, 0xbb, 0x88, 0xd1, 0x4e, 0xda, 0x2b, 0x49, 0x9a, + 0x35, 0x03, 0x8f, 0xd0, 0xac, 0x19, 0x7c, 0x60, 0xb3, 0xe6, 0x71, 0x30, 0x10, 0x39, 0x91, 0x8b, + 0xf9, 0x49, 0xa5, 0x15, 0xc3, 0x77, 0x18, 0x11, 0x09, 0x1e, 0xc4, 0x60, 0xc8, 0xc6, 0xbb, 0x56, + 0xec, 0x46, 0xfc, 0x50, 0x1a, 0x59, 0xfc, 0xfa, 0xe9, 0xa2, 0x47, 0x34, 0x33, 0x56, 0x84, 0x4a, + 0x94, 0xe8, 0x86, 0x4f, 0x80, 0x21, 0xcf, 0x3a, 0x74, 0xbc, 0xd8, 0xe3, 0x15, 0xa3, 0x21, 0xc4, + 0x36, 0x04, 0x09, 0x25, 0x3c, 0x96, 0x04, 0xf1, 0x61, 0xd5, 0x8d, 0xa9, 0x53, 0xc7, 0x92, 0x29, + 0x4b, 0x3a, 0x95, 0x04, 0x57, 0x33, 0x7c, 0xd4, 0x36, 0x82, 0x83, 0x39, 0x3e, 0x1f, 0x3c, 0xa2, + 0x81, 0x09, 0x12, 0x4a, 0x78, 0xad, 0x60, 0x52, 0x7e, 0xb4, 0x1b, 0x98, 0x1c, 0xdc, 0x36, 0x02, + 0x3e, 0x0d, 0x86, 0x3d, 0xeb, 0x70, 0x1d, 0xfb, 0xb5, 0x68, 0xcf, 0x1c, 0x9b, 0x33, 0xe6, 0xfb, + 0xca, 0x63, 0xcd, 0x46, 0x61, 0x78, 0x23, 0x21, 0xa2, 0x94, 0xcf, 0x85, 0x1d, 0x5f, 0x0a, 0x9f, + 0xd7, 0x84, 0x13, 0x22, 0x4a, 0xf9, 0xac, 0x32, 0x09, 0xad, 0x88, 0xed, 0x2b, 0x73, 0xbc, 0xf5, + 0xe2, 0xbc, 0x25, 0xc8, 0x28, 0xe1, 0xc3, 0x79, 0x90, 0xf7, 0xac, 0x43, 0x7e, 0xa7, 0x34, 0x27, + 0xb8, 0x5a, 0xde, 0x50, 0xdc, 0x90, 0x34, 0xa4, 0xb8, 0x5c, 0xd2, 0xf1, 0x85, 0xe4, 0xa4, 0x26, + 0x29, 0x69, 0x48, 0x71, 0x59, 0xfc, 0xc6, 0xbe, 0x73, 0x3f, 0xc6, 0x42, 0x18, 0x72, 0xcf, 0xa8, + 0xf8, 0xbd, 0x9b, 0xb2, 0x90, 0x2e, 0xc7, 0xee, 0x74, 0x5e, 0xec, 0x46, 0x4e, 0xe8, 0xe2, 0xcd, + 0x5d, 0xf3, 0x02, 0xf7, 0x3f, 0x2f, 0xe5, 0x37, 0x14, 0x15, 0x69, 0x12, 0xf0, 0x6d, 0xd0, 0x8f, + 0xfd, 0xd8, 0x33, 0x2f, 0xf2, 0xe3, 0xfb, 0xb4, 0xd1, 0xa7, 0xf6, 0xcb, 0xaa, 0x1f, 0x7b, 0x88, + 0x6b, 0x86, 0x2f, 0x82, 0x31, 0xcf, 0x3a, 0x64, 0x49, 0x00, 0x93, 0x88, 0x5d, 0x34, 0xa7, 0xf8, + 0xbc, 0x27, 0x59, 0x11, 0xbb, 0xa1, 0x33, 0x50, 0xab, 0x1c, 0x1f, 0xe8, 0xf8, 0xda, 0xc0, 0x69, + 0x6d, 0xa0, 0xce, 0x40, 0xad, 0x72, 0xcc, 0xc9, 0x04, 0xdf, 0x8f, 0x1d, 0x82, 0x6d, 0xf3, 0xff, + 0x78, 0xdd, 0x2b, 0xfb, 0xbb, 0x82, 0x86, 0x14, 0x17, 0xde, 0x4f, 0x5a, 0x0e, 0x26, 0xdf, 0x7c, + 0x5b, 0x3d, 0x4b, 0xdd, 0x9b, 0x64, 0x89, 0x10, 0xeb, 0x48, 0x9c, 0x2a, 0x7a, 0xb3, 0x01, 0xfa, + 0x60, 0xc0, 0x72, 0xdd, 0xcd, 0x5d, 0xf3, 0x12, 0xf7, 0x78, 0x0f, 0x4f, 0x0b, 0x95, 0x61, 0x96, + 0x98, 0x7e, 0x24, 0x60, 0x18, 0x5e, 0xe0, 0xb3, 0x58, 0x98, 0x39, 0x33, 0xbc, 0x4d, 0xa6, 0x1f, + 0x09, 0x18, 0x3e, 0x3f, 0xff, 0x68, 0x73, 0xd7, 0x7c, 0xec, 0xec, 0xe6, 0xc7, 0xf4, 0x23, 0x01, + 0x03, 0x6d, 0xd0, 0xe7, 0x07, 0x91, 0x79, 0xb9, 0xd7, 0x67, 0x2f, 0x3f, 0x4d, 0x6e, 0x07, 0x11, + 0x62, 0xea, 0xe1, 0x8f, 0x0c, 0x00, 0xc2, 0x34, 0x12, 0xaf, 0x9c, 0xb6, 0x05, 0x90, 0x41, 0x2b, + 0xa5, 0xd1, 0xbb, 0xea, 0x47, 0xe4, 0x28, 0xbd, 0xd7, 0x68, 0x51, 0xae, 0x19, 0x00, 0x7f, 0x6e, + 0x80, 0x8b, 0x7a, 0xb9, 0xab, 0x2c, 0x9b, 0xe5, 0x7e, 0xd8, 0xec, 0x61, 0x20, 0x97, 0x83, 0xc0, + 0x2d, 0x9b, 0xcd, 0x46, 0xe1, 0xe2, 0x52, 0x07, 0x40, 0xd4, 0xd1, 0x0c, 0xf8, 0x1b, 0x03, 0x4c, + 0xca, 0xec, 0xa8, 0x19, 0x57, 0xe0, 0x6e, 0x7b, 0xbb, 0x87, 0x6e, 0xcb, 0x42, 0x08, 0xef, 0xa9, + 0x57, 0xc6, 0x36, 0x3e, 0x6a, 0xb7, 0x0a, 0xfe, 0xde, 0x00, 0xa3, 0x36, 0x0e, 0xb1, 0x6f, 0x63, + 0xbf, 0xca, 0xcc, 0x9c, 0x3b, 0x6d, 0x5f, 0x21, 0x6b, 0xe6, 0x8a, 0xa6, 0x5d, 0x58, 0x58, 0x92, + 0x16, 0x8e, 0xea, 0xac, 0xe3, 0x46, 0x61, 0x3a, 0x1d, 0xaa, 0x73, 0x50, 0x8b, 0x81, 0xf0, 0xc7, + 0x06, 0x18, 0x4f, 0xdd, 0x2e, 0x0e, 0x88, 0xab, 0x67, 0xb3, 0xf0, 0xbc, 0x04, 0x5d, 0x6a, 0xc5, + 0x42, 0x59, 0x70, 0xf8, 0x5b, 0x83, 0x55, 0x5b, 0xc9, 0x5d, 0x8d, 0x9a, 0x45, 0xee, 0xc1, 0x37, + 0x7a, 0xe9, 0x41, 0xa5, 0x5c, 0x38, 0xf0, 0x7a, 0x5a, 0xc9, 0x29, 0xce, 0x71, 0xa3, 0x30, 0xa5, + 0xfb, 0x4f, 0x31, 0x90, 0x6e, 0x1c, 0x7c, 0xcf, 0x00, 0xa3, 0x38, 0x2d, 0x98, 0xa9, 0xf9, 0xf8, + 0x69, 0x5d, 0xd7, 0xb1, 0xfc, 0x16, 0xd7, 0x69, 0x8d, 0x45, 0x51, 0x0b, 0x2c, 0xab, 0xfd, 0xf0, + 0xa1, 0xe5, 0x85, 0x2e, 0x36, 0xff, 0xbf, 0x77, 0xb5, 0xdf, 0xaa, 0x50, 0x89, 0x12, 0xdd, 0xf0, + 0x3a, 0xc8, 0xfb, 0xb1, 0xeb, 0x5a, 0x3b, 0x2e, 0x36, 0x9f, 0xe0, 0x55, 0x84, 0xea, 0x2f, 0xde, + 0x96, 0x74, 0xa4, 0x24, 0xe0, 0x2e, 0x98, 0x3b, 0xbc, 0xa5, 0x3e, 0xbe, 0xe8, 0xd8, 0xc0, 0x33, + 0xaf, 0x71, 0x2d, 0x33, 0xcd, 0x46, 0x61, 0x7a, 0xbb, 0x73, 0x8b, 0xef, 0xa1, 0x3a, 0xe0, 0x9b, + 0xe0, 0x31, 0x4d, 0x66, 0xd5, 0xdb, 0xc1, 0xb6, 0x8d, 0xed, 0xe4, 0xa2, 0x65, 0x7e, 0x89, 0x43, + 0xa8, 0x7d, 0xbc, 0x9d, 0x15, 0x40, 0x0f, 0x1a, 0x0d, 0xd7, 0xc1, 0xb4, 0xc6, 0x5e, 0xf3, 0xa3, + 0x4d, 0x52, 0x89, 0x88, 0xe3, 0xd7, 0xcc, 0x79, 0xae, 0xf7, 0x62, 0xb2, 0xfb, 0xb6, 0x35, 0x1e, + 0xea, 0x32, 0x06, 0xbe, 0xd2, 0xa2, 0x8d, 0x3f, 0x5c, 0x58, 0xe1, 0x2d, 0x7c, 0x44, 0xcd, 0x27, + 0x79, 0x71, 0xc1, 0xd7, 0x79, 0x5b, 0xa3, 0xa3, 0x2e, 0xf2, 0xf0, 0x1b, 0xe0, 0x42, 0x86, 0xc3, + 0xee, 0x15, 0xe6, 0x53, 0xe2, 0x82, 0xc0, 0x2a, 0xd1, 0xed, 0x84, 0x88, 0x3a, 0x49, 0xc2, 0xaf, + 0x01, 0xa8, 0x91, 0x37, 0xac, 0x90, 0x8f, 0x7f, 0x5a, 0xdc, 0x55, 0xd8, 0x8a, 0x6e, 0x4b, 0x1a, + 0xea, 0x20, 0x07, 0x3f, 0x34, 0x5a, 0x66, 0x92, 0xde, 0x66, 0xa9, 0x79, 0x9d, 0x6f, 0xd8, 0x57, + 0x4e, 0x1e, 0x80, 0xa9, 0x32, 0x14, 0xbb, 0x58, 0xf3, 0xb0, 0x86, 0x82, 0xba, 0xa0, 0xcf, 0xb0, + 0xcb, 0x74, 0x26, 0x87, 0xc3, 0x09, 0xd0, 0xb7, 0x8f, 0xe5, 0xb3, 0x31, 0x62, 0x3f, 0xe1, 0x5b, + 0x60, 0xa0, 0x6e, 0xb9, 0x71, 0xd2, 0x0a, 0xe8, 0xdd, 0x59, 0x8f, 0x84, 0xde, 0x97, 0x72, 0x37, + 0x8c, 0x99, 0x0f, 0x0c, 0x30, 0xdd, 0xf9, 0x54, 0xf9, 0xa2, 0x2c, 0xfa, 0x99, 0x01, 0x26, 0xdb, + 0x0e, 0x90, 0x0e, 0xc6, 0xb8, 0xad, 0xc6, 0xdc, 0xeb, 0xe1, 0x49, 0x20, 0x36, 0x02, 0xaf, 0x68, + 0x75, 0xcb, 0x7e, 0x68, 0x80, 0x89, 0x6c, 0x62, 0xfe, 0x82, 0xbc, 0x54, 0x7c, 0x3f, 0x07, 0xa6, + 0x3b, 0xd7, 0xe0, 0xd0, 0x53, 0xdd, 0x85, 0x9e, 0x37, 0x68, 0x3a, 0xb5, 0x6c, 0xdf, 0x35, 0xc0, + 0xc8, 0x3b, 0x4a, 0x2e, 0x79, 0xcd, 0xec, 0x65, 0x57, 0x28, 0x39, 0xfa, 0x52, 0x06, 0x45, 0x3a, + 0x64, 0xf1, 0x77, 0x06, 0x98, 0xea, 0x78, 0x9c, 0xc3, 0x6b, 0x60, 0xd0, 0x72, 0xdd, 0xe0, 0x40, + 0x74, 0xf3, 0xb4, 0xb6, 0xfc, 0x12, 0xa7, 0x22, 0xc9, 0xd5, 0x7c, 0x96, 0xfb, 0x1c, 0x7c, 0x56, + 0xfc, 0xa3, 0x01, 0x2e, 0x3f, 0x28, 0xea, 0x3e, 0xef, 0x35, 0x9c, 0x07, 0x79, 0x59, 0x6c, 0x1f, + 0xf1, 0xf5, 0x93, 0xd9, 0x55, 0x66, 0x04, 0xfe, 0xb5, 0x8c, 0xf8, 0x55, 0xfc, 0xa5, 0x01, 0x26, + 0x2a, 0x98, 0xd4, 0x9d, 0x2a, 0x46, 0x78, 0x17, 0x13, 0xec, 0x57, 0x31, 0x5c, 0x00, 0xc3, 0xfc, + 0xb5, 0x31, 0xb4, 0xaa, 0xc9, 0x1b, 0xc9, 0xa4, 0x74, 0xf4, 0xf0, 0xed, 0x84, 0x81, 0x52, 0x19, + 0xf5, 0x9e, 0x92, 0xeb, 0xfa, 0x9e, 0x72, 0x19, 0xf4, 0x87, 0x69, 0x03, 0x38, 0xcf, 0xb8, 0xbc, + 0xe7, 0xcb, 0xa9, 0x9c, 0x1b, 0x90, 0x88, 0x77, 0xb9, 0x06, 0x24, 0x37, 0x20, 0x11, 0xe2, 0xd4, + 0xe2, 0xaf, 0x0d, 0x70, 0xbe, 0x35, 0x3f, 0x33, 0x40, 0x12, 0xbb, 0x6d, 0x0f, 0x38, 0x8c, 0x87, + 0x38, 0x47, 0xff, 0x6e, 0x20, 0xf7, 0xe0, 0xef, 0x06, 0xe0, 0xcb, 0x60, 0x52, 0xfe, 0x5c, 0x3d, + 0x0c, 0x09, 0xa6, 0xfc, 0x65, 0xb2, 0xaf, 0xf5, 0x7b, 0xbf, 0x8d, 0xac, 0x00, 0x6a, 0x1f, 0x53, + 0xfc, 0x93, 0x01, 0x2e, 0x24, 0xdf, 0xe7, 0xb8, 0x0e, 0xf6, 0xa3, 0xe5, 0xc0, 0xdf, 0x75, 0x6a, + 0xf0, 0x92, 0xe8, 0x48, 0x6a, 0x6d, 0xbe, 0xa4, 0x1b, 0x09, 0xef, 0x83, 0x21, 0x2a, 0xdc, 0x2f, + 0x23, 0xe3, 0xd5, 0x93, 0x47, 0x46, 0x76, 0x1d, 0x45, 0x41, 0x95, 0x50, 0x13, 0x1c, 0x16, 0x1c, + 0x55, 0xab, 0x1c, 0xfb, 0xb6, 0xec, 0x4a, 0x8f, 0x8a, 0xe0, 0x58, 0x5e, 0x12, 0x34, 0xa4, 0xb8, + 0xc5, 0x7f, 0x18, 0x60, 0xb2, 0xed, 0x7b, 0x23, 0xf8, 0x3d, 0x03, 0x8c, 0x56, 0xb5, 0xe9, 0xc9, + 0x2d, 0xb6, 0x71, 0xfa, 0x6f, 0x9a, 0x34, 0xa5, 0xa2, 0x2a, 0xd1, 0x29, 0xa8, 0x05, 0x14, 0x6e, + 0x03, 0xb3, 0x9a, 0xf9, 0xb4, 0x2f, 0xf3, 0x58, 0x78, 0xb9, 0xd9, 0x28, 0x98, 0xcb, 0x5d, 0x64, + 0x50, 0xd7, 0xd1, 0xe5, 0x6f, 0x7d, 0xfc, 0xd9, 0xec, 0xb9, 0x4f, 0x3e, 0x9b, 0x3d, 0xf7, 0xe9, + 0x67, 0xb3, 0xe7, 0xde, 0x6d, 0xce, 0x1a, 0x1f, 0x37, 0x67, 0x8d, 0x4f, 0x9a, 0xb3, 0xc6, 0xa7, + 0xcd, 0x59, 0xe3, 0xaf, 0xcd, 0x59, 0xe3, 0x27, 0x7f, 0x9b, 0x3d, 0xf7, 0xc6, 0x8d, 0x93, 0x7e, + 0xd0, 0xfb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x41, 0x66, 0xfd, 0x82, 0x24, 0x2c, 0x00, 0x00, } func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { @@ -2629,6 +2630,11 @@ func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x1a i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) @@ -3338,6 +3344,8 @@ func (m *ValidationRule) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -3813,6 +3821,7 @@ func (this *ValidationRule) String() string { s := strings.Join([]string{`&ValidationRule{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, `}`, }, "") return s @@ -8879,6 +8888,38 @@ func (m *ValidationRule) Unmarshal(dAtA []byte) error { } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto index d0b190fd..4632a83e 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto @@ -107,12 +107,12 @@ message CustomResourceColumnDefinition { // CustomResourceConversion describes how to convert different versions of a CR. message CustomResourceConversion { // strategy specifies how custom resources are converted between versions. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // - `"None"`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `"Webhook"`: API Server will call to an external webhook to do the conversion. Additional information // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. optional string strategy = 1; - // webhook describes how to call the conversion webhook. Required when `strategy` is set to `Webhook`. + // webhook describes how to call the conversion webhook. Required when `strategy` is set to `"Webhook"`. // +optional optional WebhookConversion webhook = 2; } @@ -665,6 +665,19 @@ message ValidationRule { // If unset, the message is "failed rule: {Rule}". // e.g. "must be a URL with the host matching spec.host" optional string message = 2; + + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + optional string messageExpression = 3; } // WebhookClientConfig contains the information to make a TLS connection with the webhook. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go index 285058d7..59ec0e37 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go @@ -74,12 +74,12 @@ type CustomResourceDefinitionSpec struct { // CustomResourceConversion describes how to convert different versions of a CR. type CustomResourceConversion struct { // strategy specifies how custom resources are converted between versions. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // - `"None"`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `"Webhook"`: API Server will call to an external webhook to do the conversion. Additional information // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` - // webhook describes how to call the conversion webhook. Required when `strategy` is set to `Webhook`. + // webhook describes how to call the conversion webhook. Required when `strategy` is set to `"Webhook"`. // +optional Webhook *WebhookConversion `json:"webhook,omitempty" protobuf:"bytes,2,opt,name=webhook"` } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go index 277fd7a1..b348d0d1 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go @@ -235,12 +235,24 @@ type ValidationRule struct { // If unset, the message is "failed rule: {Rule}". // e.g. "must be a URL with the host matching spec.host" Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,3,opt,name=messageExpression"` } // JSON represents any valid JSON value. // These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. type JSON struct { - Raw []byte `protobuf:"bytes,1,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` } // OpenAPISchemaType is used by the kube-openapi generator when constructing diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go index 95a58529..cde5275c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go @@ -1258,6 +1258,7 @@ func Convert_apiextensions_ServiceReference_To_v1_ServiceReference(in *apiextens func autoConvert_v1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { out.Rule = in.Rule out.Message = in.Message + out.MessageExpression = in.MessageExpression return nil } @@ -1269,6 +1270,7 @@ func Convert_v1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRul func autoConvert_apiextensions_ValidationRule_To_v1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { out.Rule = in.Rule out.Message = in.Message + out.MessageExpression = in.MessageExpression return nil } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index 6ab50066..011bd72d 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -785,200 +785,202 @@ func init() { } var fileDescriptor_98a4cc6918394e53 = []byte{ - // 3079 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcf, 0x73, 0x23, 0x47, - 0xf5, 0xdf, 0x91, 0x2d, 0x5b, 0x6e, 0xdb, 0x6b, 0xbb, 0x77, 0xed, 0xcc, 0x3a, 0x1b, 0xcb, 0x56, - 0xbe, 0xd9, 0xaf, 0x93, 0x6c, 0xe4, 0xc4, 0x24, 0x24, 0xa4, 0x48, 0x51, 0x96, 0xed, 0x0d, 0x4e, - 0xd6, 0x3f, 0x68, 0xed, 0x26, 0x86, 0xfc, 0x6c, 0x6b, 0xda, 0xf2, 0xc4, 0xa3, 0x99, 0xd9, 0xe9, - 0x19, 0xd9, 0xae, 0x00, 0xc5, 0x8f, 0x4a, 0x41, 0x51, 0x40, 0x28, 0x92, 0x0b, 0x05, 0x1c, 0x02, - 0xc5, 0x85, 0x03, 0x1c, 0xe0, 0x06, 0x7f, 0x40, 0x8e, 0x29, 0x8a, 0x43, 0x0e, 0x94, 0x20, 0xe2, - 0xca, 0x91, 0x2a, 0xaa, 0x7c, 0xa2, 0xfa, 0xc7, 0xf4, 0xb4, 0x46, 0xd2, 0xee, 0x56, 0x2c, 0x65, - 0xb9, 0x69, 0xde, 0xaf, 0xcf, 0x9b, 0xd7, 0xaf, 0x5f, 0xbf, 0x7e, 0x23, 0xb0, 0x7f, 0xf8, 0x0c, - 0x2d, 0xda, 0xde, 0xd2, 0x61, 0xb4, 0x47, 0x02, 0x97, 0x84, 0x84, 0x2e, 0xd5, 0x89, 0x6b, 0x79, - 0xc1, 0x92, 0x64, 0x60, 0xdf, 0x26, 0xc7, 0x21, 0x71, 0xa9, 0xed, 0xb9, 0xf4, 0x31, 0xec, 0xdb, - 0x94, 0x04, 0x75, 0x12, 0x2c, 0xf9, 0x87, 0x55, 0xc6, 0xa3, 0xad, 0x02, 0x4b, 0xf5, 0x27, 0xf6, - 0x48, 0x88, 0x9f, 0x58, 0xaa, 0x12, 0x97, 0x04, 0x38, 0x24, 0x56, 0xd1, 0x0f, 0xbc, 0xd0, 0x83, - 0xcf, 0x09, 0x73, 0xc5, 0x16, 0xe9, 0x37, 0x94, 0xb9, 0xa2, 0x7f, 0x58, 0x65, 0x3c, 0xda, 0x2a, - 0x50, 0x94, 0xe6, 0x66, 0x1f, 0xab, 0xda, 0xe1, 0x41, 0xb4, 0x57, 0xac, 0x78, 0xb5, 0xa5, 0xaa, - 0x57, 0xf5, 0x96, 0xb8, 0xd5, 0xbd, 0x68, 0x9f, 0x3f, 0xf1, 0x07, 0xfe, 0x4b, 0xa0, 0xcd, 0x3e, - 0x99, 0x38, 0x5f, 0xc3, 0x95, 0x03, 0xdb, 0x25, 0xc1, 0x49, 0xe2, 0x71, 0x8d, 0x84, 0x78, 0xa9, - 0xde, 0xe6, 0xe3, 0xec, 0x52, 0x37, 0xad, 0x20, 0x72, 0x43, 0xbb, 0x46, 0xda, 0x14, 0x3e, 0x7f, - 0x27, 0x05, 0x5a, 0x39, 0x20, 0x35, 0x9c, 0xd6, 0x2b, 0x9c, 0x1a, 0x60, 0x6a, 0xd5, 0x73, 0xeb, - 0x24, 0x60, 0x6f, 0x89, 0xc8, 0xad, 0x88, 0xd0, 0x10, 0x96, 0xc0, 0x40, 0x64, 0x5b, 0xa6, 0x31, - 0x6f, 0x2c, 0x8e, 0x94, 0x1e, 0xff, 0xb0, 0x91, 0x3f, 0xd7, 0x6c, 0xe4, 0x07, 0x6e, 0x6e, 0xac, - 0x9d, 0x36, 0xf2, 0x0b, 0xdd, 0x90, 0xc2, 0x13, 0x9f, 0xd0, 0xe2, 0xcd, 0x8d, 0x35, 0xc4, 0x94, - 0xe1, 0xf3, 0x60, 0xca, 0x22, 0xd4, 0x0e, 0x88, 0xb5, 0xb2, 0xb3, 0xf1, 0x92, 0xb0, 0x6f, 0x66, - 0xb8, 0xc5, 0x4b, 0xd2, 0xe2, 0xd4, 0x5a, 0x5a, 0x00, 0xb5, 0xeb, 0xc0, 0x5d, 0x30, 0xec, 0xed, - 0xbd, 0x45, 0x2a, 0x21, 0x35, 0x07, 0xe6, 0x07, 0x16, 0x47, 0x97, 0x1f, 0x2b, 0x26, 0x2b, 0xa8, - 0x5c, 0xe0, 0xcb, 0x26, 0x5f, 0xb6, 0x88, 0xf0, 0xd1, 0x7a, 0xbc, 0x72, 0xa5, 0x09, 0x89, 0x36, - 0xbc, 0x2d, 0xac, 0xa0, 0xd8, 0x5c, 0xe1, 0xd7, 0x19, 0x00, 0xf5, 0x97, 0xa7, 0xbe, 0xe7, 0x52, - 0xd2, 0x93, 0xb7, 0xa7, 0x60, 0xb2, 0xc2, 0x2d, 0x87, 0xc4, 0x92, 0xb8, 0x66, 0xe6, 0xd3, 0x78, - 0x6f, 0x4a, 0xfc, 0xc9, 0xd5, 0x94, 0x39, 0xd4, 0x06, 0x00, 0x6f, 0x80, 0xa1, 0x80, 0xd0, 0xc8, - 0x09, 0xcd, 0x81, 0x79, 0x63, 0x71, 0x74, 0xf9, 0x6a, 0x57, 0x28, 0x9e, 0xdf, 0x2c, 0xf9, 0x8a, - 0xf5, 0x27, 0x8a, 0xe5, 0x10, 0x87, 0x11, 0x2d, 0x9d, 0x97, 0x48, 0x43, 0x88, 0xdb, 0x40, 0xd2, - 0x56, 0xe1, 0xfb, 0x19, 0x30, 0xa9, 0x47, 0xa9, 0x6e, 0x93, 0x23, 0x78, 0x04, 0x86, 0x03, 0x91, - 0x2c, 0x3c, 0x4e, 0xa3, 0xcb, 0x3b, 0xc5, 0x33, 0x6d, 0xab, 0x62, 0x5b, 0x12, 0x96, 0x46, 0xd9, - 0x9a, 0xc9, 0x07, 0x14, 0xa3, 0xc1, 0xb7, 0x41, 0x2e, 0x90, 0x0b, 0xc5, 0xb3, 0x69, 0x74, 0xf9, - 0x2b, 0x3d, 0x44, 0x16, 0x86, 0x4b, 0x63, 0xcd, 0x46, 0x3e, 0x17, 0x3f, 0x21, 0x05, 0x58, 0x78, - 0x2f, 0x03, 0xe6, 0x56, 0x23, 0x1a, 0x7a, 0x35, 0x44, 0xa8, 0x17, 0x05, 0x15, 0xb2, 0xea, 0x39, - 0x51, 0xcd, 0x5d, 0x23, 0xfb, 0xb6, 0x6b, 0x87, 0x2c, 0x5b, 0xe7, 0xc1, 0xa0, 0x8b, 0x6b, 0x44, - 0x66, 0xcf, 0x98, 0x8c, 0xe9, 0xe0, 0x16, 0xae, 0x11, 0xc4, 0x39, 0x4c, 0x82, 0x25, 0x8b, 0xdc, - 0x0b, 0x4a, 0xe2, 0xc6, 0x89, 0x4f, 0x10, 0xe7, 0xc0, 0x2b, 0x60, 0x68, 0xdf, 0x0b, 0x6a, 0x58, - 0xac, 0xe3, 0x48, 0xb2, 0x32, 0xd7, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x0a, 0x8c, 0x5a, 0x84, 0x56, - 0x02, 0xdb, 0x67, 0xd0, 0xe6, 0x20, 0x17, 0xbe, 0x20, 0x85, 0x47, 0xd7, 0x12, 0x16, 0xd2, 0xe5, - 0xe0, 0x55, 0x90, 0xf3, 0x03, 0xdb, 0x0b, 0xec, 0xf0, 0xc4, 0xcc, 0xce, 0x1b, 0x8b, 0xd9, 0xd2, - 0xa4, 0xd4, 0xc9, 0xed, 0x48, 0x3a, 0x52, 0x12, 0x70, 0x1e, 0xe4, 0x5e, 0x28, 0x6f, 0x6f, 0xed, - 0xe0, 0xf0, 0xc0, 0x1c, 0xe2, 0x08, 0x83, 0x4c, 0x1a, 0x29, 0x6a, 0xe1, 0x6f, 0x19, 0x60, 0xa6, - 0xa3, 0x12, 0x87, 0x14, 0x5e, 0x03, 0x39, 0x1a, 0xb2, 0x8a, 0x53, 0x3d, 0x91, 0x31, 0x79, 0x24, - 0x06, 0x2b, 0x4b, 0xfa, 0x69, 0x23, 0x3f, 0x93, 0x68, 0xc4, 0x54, 0x1e, 0x0f, 0xa5, 0x0b, 0x7f, - 0x69, 0x80, 0x0b, 0x47, 0x64, 0xef, 0xc0, 0xf3, 0x0e, 0x57, 0x1d, 0x9b, 0xb8, 0xe1, 0xaa, 0xe7, - 0xee, 0xdb, 0x55, 0x99, 0x03, 0xe8, 0x8c, 0x39, 0xf0, 0x72, 0xbb, 0xe5, 0xd2, 0x7d, 0xcd, 0x46, - 0xfe, 0x42, 0x07, 0x06, 0xea, 0xe4, 0x07, 0xdc, 0x05, 0x66, 0x25, 0xb5, 0x49, 0x64, 0x01, 0x13, - 0x65, 0x6b, 0xa4, 0x74, 0xb9, 0xd9, 0xc8, 0x9b, 0xab, 0x5d, 0x64, 0x50, 0x57, 0xed, 0xc2, 0x77, - 0x07, 0xd2, 0xe1, 0xd5, 0xd2, 0xed, 0x4d, 0x90, 0x63, 0xdb, 0xd8, 0xc2, 0x21, 0x96, 0x1b, 0xf1, - 0xf1, 0xbb, 0xdb, 0xf4, 0xa2, 0x66, 0x6c, 0x92, 0x10, 0x97, 0xa0, 0x5c, 0x10, 0x90, 0xd0, 0x90, - 0xb2, 0x0a, 0xbf, 0x01, 0x06, 0xa9, 0x4f, 0x2a, 0x32, 0xd0, 0xaf, 0x9c, 0x75, 0xb3, 0x75, 0x79, - 0x91, 0xb2, 0x4f, 0x2a, 0xc9, 0x5e, 0x60, 0x4f, 0x88, 0xc3, 0xc2, 0x77, 0x0c, 0x30, 0x44, 0x79, - 0x81, 0x92, 0x45, 0xed, 0xb5, 0x7e, 0x79, 0x90, 0xaa, 0x82, 0xe2, 0x19, 0x49, 0xf0, 0xc2, 0xbf, - 0x33, 0x60, 0xa1, 0x9b, 0xea, 0xaa, 0xe7, 0x5a, 0x62, 0x39, 0x36, 0xe4, 0xde, 0x16, 0x99, 0xfe, - 0x94, 0xbe, 0xb7, 0x4f, 0x1b, 0xf9, 0x87, 0xee, 0x68, 0x40, 0x2b, 0x02, 0x5f, 0x50, 0xef, 0x2d, - 0x0a, 0xc5, 0x42, 0xab, 0x63, 0xa7, 0x8d, 0xfc, 0x84, 0x52, 0x6b, 0xf5, 0x15, 0xd6, 0x01, 0x74, - 0x30, 0x0d, 0x6f, 0x04, 0xd8, 0xa5, 0xc2, 0xac, 0x5d, 0x23, 0x32, 0x7c, 0x8f, 0xdc, 0x5d, 0x7a, - 0x30, 0x8d, 0xd2, 0xac, 0x84, 0x84, 0xd7, 0xdb, 0xac, 0xa1, 0x0e, 0x08, 0xac, 0x6e, 0x05, 0x04, - 0x53, 0x55, 0x8a, 0xb4, 0x13, 0x85, 0x51, 0x91, 0xe4, 0xc2, 0x87, 0xc1, 0x70, 0x8d, 0x50, 0x8a, - 0xab, 0x84, 0xd7, 0x9f, 0x91, 0xe4, 0x88, 0xde, 0x14, 0x64, 0x14, 0xf3, 0x59, 0x7f, 0x72, 0xb9, - 0x5b, 0xd4, 0xae, 0xdb, 0x34, 0x84, 0xaf, 0xb6, 0x6d, 0x80, 0xe2, 0xdd, 0xbd, 0x21, 0xd3, 0xe6, - 0xe9, 0xaf, 0x8a, 0x5f, 0x4c, 0xd1, 0x92, 0xff, 0xeb, 0x20, 0x6b, 0x87, 0xa4, 0x16, 0x9f, 0xdd, - 0x2f, 0xf7, 0x29, 0xf7, 0x4a, 0xe3, 0xd2, 0x87, 0xec, 0x06, 0x43, 0x43, 0x02, 0xb4, 0xf0, 0x9b, - 0x0c, 0x78, 0xa0, 0x9b, 0x0a, 0x3b, 0x50, 0x28, 0x8b, 0xb8, 0xef, 0x44, 0x01, 0x76, 0x64, 0xc6, - 0xa9, 0x88, 0xef, 0x70, 0x2a, 0x92, 0x5c, 0x56, 0xf2, 0xa9, 0xed, 0x56, 0x23, 0x07, 0x07, 0x32, - 0x9d, 0xd4, 0x5b, 0x97, 0x25, 0x1d, 0x29, 0x09, 0x58, 0x04, 0x80, 0x1e, 0x78, 0x41, 0xc8, 0x31, - 0x64, 0xf5, 0x3a, 0xcf, 0x0a, 0x44, 0x59, 0x51, 0x91, 0x26, 0xc1, 0x4e, 0xb4, 0x43, 0xdb, 0xb5, - 0xe4, 0xaa, 0xab, 0x5d, 0xfc, 0xa2, 0xed, 0x5a, 0x88, 0x73, 0x18, 0xbe, 0x63, 0xd3, 0x90, 0x51, - 0xe4, 0x92, 0xb7, 0x44, 0x9d, 0x4b, 0x2a, 0x09, 0x86, 0x5f, 0x61, 0x55, 0xdf, 0x0b, 0x6c, 0x42, - 0xcd, 0xa1, 0x04, 0x7f, 0x55, 0x51, 0x91, 0x26, 0x51, 0xf8, 0x57, 0xae, 0x7b, 0x92, 0xb0, 0x52, - 0x02, 0x1f, 0x04, 0xd9, 0x6a, 0xe0, 0x45, 0xbe, 0x8c, 0x92, 0x8a, 0xf6, 0xf3, 0x8c, 0x88, 0x04, - 0x8f, 0x65, 0x65, 0xbd, 0xa5, 0x4d, 0x55, 0x59, 0x19, 0x37, 0xa7, 0x31, 0x1f, 0x7e, 0xdb, 0x00, - 0x59, 0x57, 0x06, 0x87, 0xa5, 0xdc, 0xab, 0x7d, 0xca, 0x0b, 0x1e, 0xde, 0xc4, 0x5d, 0x11, 0x79, - 0x81, 0x0c, 0x9f, 0x04, 0x59, 0x5a, 0xf1, 0x7c, 0x22, 0xa3, 0x3e, 0x17, 0x0b, 0x95, 0x19, 0xf1, - 0xb4, 0x91, 0x1f, 0x8f, 0xcd, 0x71, 0x02, 0x12, 0xc2, 0xf0, 0x7b, 0x06, 0x00, 0x75, 0xec, 0xd8, - 0x16, 0xe6, 0x2d, 0x43, 0x96, 0xbb, 0xdf, 0xdb, 0xb4, 0x7e, 0x49, 0x99, 0x17, 0x8b, 0x96, 0x3c, - 0x23, 0x0d, 0x1a, 0xbe, 0x6b, 0x80, 0x31, 0x1a, 0xed, 0x05, 0x52, 0x8b, 0xf2, 0xe6, 0x62, 0x74, - 0xf9, 0xab, 0x3d, 0xf5, 0xa5, 0xac, 0x01, 0x94, 0x26, 0x9b, 0x8d, 0xfc, 0x98, 0x4e, 0x41, 0x2d, - 0x0e, 0xc0, 0x1f, 0x1a, 0x20, 0x57, 0x8f, 0xcf, 0xec, 0x61, 0xbe, 0xe1, 0x5f, 0xef, 0xd3, 0xc2, - 0xca, 0x8c, 0x4a, 0x76, 0x81, 0xea, 0x03, 0x94, 0x07, 0xf0, 0x4f, 0x06, 0x30, 0xb1, 0x25, 0x0a, - 0x3c, 0x76, 0x76, 0x02, 0xdb, 0x0d, 0x49, 0x20, 0xfa, 0x4d, 0x6a, 0xe6, 0xb8, 0x7b, 0xbd, 0x3d, - 0x0b, 0xd3, 0xbd, 0x6c, 0x69, 0x5e, 0x7a, 0x67, 0xae, 0x74, 0x71, 0x03, 0x75, 0x75, 0x90, 0x27, - 0x5a, 0xd2, 0xd2, 0x98, 0x23, 0x7d, 0x48, 0xb4, 0xa4, 0x97, 0x92, 0xd5, 0x21, 0xe9, 0xa0, 0x34, - 0x68, 0xb8, 0x0d, 0xa6, 0xfd, 0x80, 0x70, 0x80, 0x9b, 0xee, 0xa1, 0xeb, 0x1d, 0xb9, 0xd7, 0x6c, - 0xe2, 0x58, 0xd4, 0x04, 0xf3, 0xc6, 0x62, 0xae, 0x74, 0xa9, 0xd9, 0xc8, 0x4f, 0xef, 0x74, 0x12, - 0x40, 0x9d, 0xf5, 0x0a, 0xef, 0x0e, 0xa4, 0x6f, 0x01, 0xe9, 0x2e, 0x02, 0xbe, 0x2f, 0xde, 0x5e, - 0xc4, 0x86, 0x9a, 0x06, 0x5f, 0xad, 0x37, 0xfb, 0x94, 0x4c, 0xaa, 0x0d, 0x48, 0x3a, 0x39, 0x45, - 0xa2, 0x48, 0xf3, 0x03, 0xfe, 0xcc, 0x00, 0xe3, 0xb8, 0x52, 0x21, 0x7e, 0x48, 0x2c, 0x51, 0xdc, - 0x33, 0x9f, 0x41, 0xfd, 0x9a, 0x96, 0x5e, 0x8d, 0xaf, 0xe8, 0xd0, 0xa8, 0xd5, 0x13, 0xf8, 0x2c, - 0x38, 0x4f, 0x43, 0x2f, 0x20, 0x56, 0xaa, 0x6d, 0x86, 0xcd, 0x46, 0xfe, 0x7c, 0xb9, 0x85, 0x83, - 0x52, 0x92, 0x85, 0xbf, 0x67, 0x41, 0xfe, 0x0e, 0x5b, 0xed, 0x2e, 0x2e, 0x66, 0x57, 0xc0, 0x10, - 0x7f, 0x5d, 0x8b, 0x47, 0x25, 0xa7, 0xb5, 0x82, 0x9c, 0x8a, 0x24, 0x97, 0x1d, 0x14, 0x0c, 0x9f, - 0xb5, 0x2f, 0x03, 0x5c, 0x50, 0x1d, 0x14, 0x65, 0x41, 0x46, 0x31, 0x1f, 0x2e, 0x03, 0x60, 0x11, - 0x3f, 0x20, 0xec, 0xb0, 0xb2, 0xcc, 0x61, 0x2e, 0xad, 0x16, 0x69, 0x4d, 0x71, 0x90, 0x26, 0x05, - 0xaf, 0x01, 0x18, 0x3f, 0xd9, 0x9e, 0xfb, 0x32, 0x0e, 0x5c, 0xdb, 0xad, 0x9a, 0x39, 0xee, 0xf6, - 0x0c, 0xeb, 0xc6, 0xd6, 0xda, 0xb8, 0xa8, 0x83, 0x06, 0x7c, 0x1b, 0x0c, 0x89, 0xa1, 0x0f, 0x3f, - 0x21, 0xfa, 0x58, 0xe5, 0x01, 0x8f, 0x11, 0x87, 0x42, 0x12, 0xb2, 0xbd, 0xba, 0x67, 0xef, 0x75, - 0x75, 0xbf, 0x6d, 0x39, 0x1d, 0xfa, 0x1f, 0x2f, 0xa7, 0x85, 0xff, 0x18, 0xe9, 0x9a, 0xa3, 0xbd, - 0x6a, 0xb9, 0x82, 0x1d, 0x02, 0xd7, 0xc0, 0x24, 0xbb, 0x31, 0x21, 0xe2, 0x3b, 0x76, 0x05, 0x53, - 0x7e, 0x61, 0x17, 0xc9, 0xae, 0x66, 0x48, 0xe5, 0x14, 0x1f, 0xb5, 0x69, 0xc0, 0x17, 0x00, 0x14, - 0xb7, 0x88, 0x16, 0x3b, 0xa2, 0x21, 0x52, 0xf7, 0x81, 0x72, 0x9b, 0x04, 0xea, 0xa0, 0x05, 0x57, - 0xc1, 0x94, 0x83, 0xf7, 0x88, 0x53, 0x26, 0x0e, 0xa9, 0x84, 0x5e, 0xc0, 0x4d, 0x89, 0x91, 0xc6, - 0x74, 0xb3, 0x91, 0x9f, 0xba, 0x9e, 0x66, 0xa2, 0x76, 0xf9, 0xc2, 0x42, 0x7a, 0x6b, 0xeb, 0x2f, - 0x2e, 0xee, 0x66, 0x1f, 0x64, 0xc0, 0x6c, 0xf7, 0xcc, 0x80, 0xdf, 0x49, 0xae, 0x90, 0xe2, 0x86, - 0xf0, 0x7a, 0xbf, 0xb2, 0x50, 0xde, 0x21, 0x41, 0xfb, 0xfd, 0x11, 0x7e, 0x93, 0xb5, 0x6b, 0xd8, - 0x89, 0x87, 0x56, 0xaf, 0xf5, 0xcd, 0x05, 0x06, 0x52, 0x1a, 0x11, 0x9d, 0x20, 0x76, 0x78, 0xe3, - 0x87, 0x1d, 0x52, 0xf8, 0xad, 0x91, 0x9e, 0x22, 0x24, 0x3b, 0x18, 0xfe, 0xc8, 0x00, 0x13, 0x9e, - 0x4f, 0xdc, 0x95, 0x9d, 0x8d, 0x97, 0x3e, 0x27, 0x76, 0xb2, 0x0c, 0xd5, 0xd6, 0x19, 0xfd, 0x7c, - 0xa1, 0xbc, 0xbd, 0x25, 0x0c, 0xee, 0x04, 0x9e, 0x4f, 0x4b, 0x17, 0x9a, 0x8d, 0xfc, 0xc4, 0x76, - 0x2b, 0x14, 0x4a, 0x63, 0x17, 0x6a, 0x60, 0x7a, 0xfd, 0x38, 0x24, 0x81, 0x8b, 0x9d, 0x35, 0xaf, - 0x12, 0xd5, 0x88, 0x1b, 0x0a, 0x47, 0x53, 0x13, 0x2f, 0xe3, 0x2e, 0x27, 0x5e, 0x0f, 0x80, 0x81, - 0x28, 0x70, 0x64, 0x16, 0x8f, 0xaa, 0x89, 0x2e, 0xba, 0x8e, 0x18, 0xbd, 0xb0, 0x00, 0x06, 0x99, - 0x9f, 0xf0, 0x12, 0x18, 0x08, 0xf0, 0x11, 0xb7, 0x3a, 0x56, 0x1a, 0x66, 0x22, 0x08, 0x1f, 0x21, - 0x46, 0x2b, 0xfc, 0x75, 0x01, 0x4c, 0xa4, 0xde, 0x05, 0xce, 0x82, 0x8c, 0x1a, 0x13, 0x03, 0x69, - 0x34, 0xb3, 0xb1, 0x86, 0x32, 0xb6, 0x05, 0x9f, 0x56, 0xc5, 0x57, 0x80, 0xe6, 0xd5, 0x59, 0xc2, - 0xa9, 0xac, 0x3f, 0x4f, 0xcc, 0x31, 0x47, 0xe2, 0xc2, 0xc9, 0x7c, 0x20, 0xfb, 0x72, 0x97, 0x08, - 0x1f, 0xc8, 0x3e, 0x62, 0xb4, 0x4f, 0x3b, 0xee, 0x8b, 0xe7, 0x8d, 0xd9, 0xbb, 0x98, 0x37, 0x0e, - 0xdd, 0x76, 0xde, 0xf8, 0x20, 0xc8, 0x86, 0x76, 0xe8, 0x10, 0x7e, 0x90, 0x69, 0xd7, 0xa8, 0x1b, - 0x8c, 0x88, 0x04, 0x0f, 0xbe, 0x05, 0x86, 0x2d, 0xb2, 0x8f, 0x23, 0x27, 0xe4, 0x67, 0xd6, 0xe8, - 0xf2, 0x6a, 0x0f, 0x52, 0x48, 0x0c, 0x83, 0xd7, 0x84, 0x5d, 0x14, 0x03, 0xc0, 0x87, 0xc0, 0x70, - 0x0d, 0x1f, 0xdb, 0xb5, 0xa8, 0xc6, 0x1b, 0x4c, 0x43, 0x88, 0x6d, 0x0a, 0x12, 0x8a, 0x79, 0xac, - 0x32, 0x92, 0xe3, 0x8a, 0x13, 0x51, 0xbb, 0x4e, 0x24, 0x53, 0x36, 0x7f, 0xaa, 0x32, 0xae, 0xa7, - 0xf8, 0xa8, 0x4d, 0x83, 0x83, 0xd9, 0x2e, 0x57, 0x1e, 0xd5, 0xc0, 0x04, 0x09, 0xc5, 0xbc, 0x56, - 0x30, 0x29, 0x3f, 0xd6, 0x0d, 0x4c, 0x2a, 0xb7, 0x69, 0xc0, 0x47, 0xc1, 0x48, 0x0d, 0x1f, 0x5f, - 0x27, 0x6e, 0x35, 0x3c, 0x30, 0xc7, 0xe7, 0x8d, 0xc5, 0x81, 0xd2, 0x78, 0xb3, 0x91, 0x1f, 0xd9, - 0x8c, 0x89, 0x28, 0xe1, 0x73, 0x61, 0xdb, 0x95, 0xc2, 0xe7, 0x35, 0xe1, 0x98, 0x88, 0x12, 0x3e, - 0xeb, 0x5e, 0x7c, 0x1c, 0xb2, 0xcd, 0x65, 0x4e, 0xb4, 0x5e, 0x73, 0x77, 0x04, 0x19, 0xc5, 0x7c, - 0xb8, 0x08, 0x72, 0x35, 0x7c, 0xcc, 0x47, 0x12, 0xe6, 0x24, 0x37, 0xcb, 0x07, 0xe3, 0x9b, 0x92, - 0x86, 0x14, 0x97, 0x4b, 0xda, 0xae, 0x90, 0x9c, 0xd2, 0x24, 0x25, 0x0d, 0x29, 0x2e, 0x4b, 0xe2, - 0xc8, 0xb5, 0x6f, 0x45, 0x44, 0x08, 0x43, 0x1e, 0x19, 0x95, 0xc4, 0x37, 0x13, 0x16, 0xd2, 0xe5, - 0x60, 0x11, 0x80, 0x5a, 0xe4, 0x84, 0xb6, 0xef, 0x90, 0xed, 0x7d, 0xf3, 0x02, 0x8f, 0x3f, 0x6f, - 0xfa, 0x37, 0x15, 0x15, 0x69, 0x12, 0x90, 0x80, 0x41, 0xe2, 0x46, 0x35, 0xf3, 0x22, 0x3f, 0xd8, - 0x7b, 0x92, 0x82, 0x6a, 0xe7, 0xac, 0xbb, 0x51, 0x0d, 0x71, 0xf3, 0xf0, 0x69, 0x30, 0x5e, 0xc3, - 0xc7, 0xac, 0x1c, 0x90, 0x20, 0xb4, 0x09, 0x35, 0xa7, 0xf9, 0xcb, 0x4f, 0xb1, 0x6e, 0x77, 0x53, - 0x67, 0xa0, 0x56, 0x39, 0xae, 0x68, 0xbb, 0x9a, 0xe2, 0x8c, 0xa6, 0xa8, 0x33, 0x50, 0xab, 0x1c, - 0x8b, 0x74, 0x40, 0x6e, 0x45, 0x76, 0x40, 0x2c, 0xf3, 0x3e, 0xde, 0x20, 0xcb, 0x8f, 0x15, 0x82, - 0x86, 0x14, 0x17, 0xd6, 0xe3, 0xd9, 0x95, 0xc9, 0xb7, 0xe1, 0xcd, 0xde, 0x56, 0xf2, 0xed, 0x60, - 0x25, 0x08, 0xf0, 0x89, 0x38, 0x69, 0xf4, 0xa9, 0x15, 0xa4, 0x20, 0x8b, 0x1d, 0x67, 0x7b, 0xdf, - 0xbc, 0xc4, 0x63, 0xdf, 0xeb, 0x13, 0x44, 0x55, 0x9d, 0x15, 0x06, 0x82, 0x04, 0x16, 0x03, 0xf5, - 0x5c, 0x96, 0x1a, 0xb3, 0xfd, 0x05, 0xdd, 0x66, 0x20, 0x48, 0x60, 0xf1, 0x37, 0x75, 0x4f, 0xb6, - 0xf7, 0xcd, 0xfb, 0xfb, 0xfc, 0xa6, 0x0c, 0x04, 0x09, 0x2c, 0x68, 0x83, 0x01, 0xd7, 0x0b, 0xcd, - 0xcb, 0x7d, 0x39, 0x9e, 0xf9, 0x81, 0xb3, 0xe5, 0x85, 0x88, 0x61, 0xc0, 0x9f, 0x1a, 0x00, 0xf8, - 0x49, 0x8a, 0x3e, 0xd0, 0x93, 0x91, 0x48, 0x0a, 0xb2, 0x98, 0xe4, 0xf6, 0xba, 0x1b, 0x06, 0x27, - 0xc9, 0xf5, 0x48, 0xdb, 0x03, 0x9a, 0x17, 0xf0, 0x57, 0x06, 0xb8, 0xa8, 0xb7, 0xc9, 0xca, 0xbd, - 0x39, 0x1e, 0x91, 0x1b, 0xbd, 0x4e, 0xf3, 0x92, 0xe7, 0x39, 0x25, 0xb3, 0xd9, 0xc8, 0x5f, 0x5c, - 0xe9, 0x80, 0x8a, 0x3a, 0xfa, 0x02, 0x7f, 0x67, 0x80, 0x29, 0x59, 0x45, 0x35, 0x0f, 0xf3, 0x3c, - 0x80, 0xa4, 0xd7, 0x01, 0x4c, 0xe3, 0x88, 0x38, 0xaa, 0x8f, 0xec, 0x6d, 0x7c, 0xd4, 0xee, 0x1a, - 0xfc, 0xa3, 0x01, 0xc6, 0x2c, 0xe2, 0x13, 0xd7, 0x22, 0x6e, 0x85, 0xf9, 0x3a, 0xdf, 0x93, 0x91, - 0x45, 0xda, 0xd7, 0x35, 0x0d, 0x42, 0xb8, 0x59, 0x94, 0x6e, 0x8e, 0xe9, 0xac, 0xd3, 0x46, 0x7e, - 0x26, 0x51, 0xd5, 0x39, 0xa8, 0xc5, 0x4b, 0xf8, 0x9e, 0x01, 0x26, 0x92, 0x05, 0x10, 0x47, 0xca, - 0x42, 0x1f, 0xf3, 0x80, 0xb7, 0xaf, 0x2b, 0xad, 0x80, 0x28, 0xed, 0x01, 0xfc, 0xbd, 0xc1, 0x3a, - 0xb5, 0xf8, 0xde, 0x47, 0xcd, 0x02, 0x8f, 0xe5, 0x1b, 0x3d, 0x8f, 0xa5, 0x42, 0x10, 0xa1, 0xbc, - 0x9a, 0xb4, 0x82, 0x8a, 0x73, 0xda, 0xc8, 0x4f, 0xeb, 0x91, 0x54, 0x0c, 0xa4, 0x7b, 0x08, 0x7f, - 0x60, 0x80, 0x31, 0x92, 0x74, 0xdc, 0xd4, 0x7c, 0xb0, 0x27, 0x41, 0xec, 0xd8, 0xc4, 0x8b, 0x9b, - 0xba, 0xc6, 0xa2, 0xa8, 0x05, 0x9b, 0x75, 0x90, 0xe4, 0x18, 0xd7, 0x7c, 0x87, 0x98, 0xff, 0xd7, - 0xe3, 0x0e, 0x72, 0x5d, 0xd8, 0x45, 0x31, 0x00, 0xbc, 0x0a, 0x72, 0x6e, 0xe4, 0x38, 0x78, 0xcf, - 0x21, 0xe6, 0x43, 0xbc, 0x17, 0x51, 0x23, 0xd9, 0x2d, 0x49, 0x47, 0x4a, 0x02, 0xee, 0x83, 0xf9, - 0xe3, 0x17, 0xd5, 0xdf, 0x93, 0x3a, 0x0e, 0x0d, 0xcd, 0x2b, 0xdc, 0xca, 0x6c, 0xb3, 0x91, 0x9f, - 0xd9, 0xed, 0x3c, 0x56, 0xbc, 0xa3, 0x0d, 0xf8, 0x0a, 0xb8, 0x5f, 0x93, 0x59, 0xaf, 0xed, 0x11, - 0xcb, 0x22, 0x56, 0x7c, 0x71, 0x33, 0xff, 0x5f, 0x0c, 0x2e, 0xe3, 0x0d, 0xbe, 0x9b, 0x16, 0x40, - 0xb7, 0xd3, 0x86, 0xd7, 0xc1, 0x8c, 0xc6, 0xde, 0x70, 0xc3, 0xed, 0xa0, 0x1c, 0x06, 0xb6, 0x5b, - 0x35, 0x17, 0xb9, 0xdd, 0x8b, 0xf1, 0x8e, 0xdc, 0xd5, 0x78, 0xa8, 0x8b, 0x0e, 0xfc, 0x72, 0x8b, - 0x35, 0xfe, 0x09, 0x0d, 0xfb, 0x2f, 0x92, 0x13, 0x6a, 0x3e, 0xcc, 0xbb, 0x13, 0xbe, 0xd8, 0xbb, - 0x1a, 0x1d, 0x75, 0x91, 0x87, 0x5f, 0x02, 0x17, 0x52, 0x1c, 0x76, 0x45, 0x31, 0x1f, 0x11, 0x77, - 0x0d, 0xd6, 0xcf, 0xee, 0xc6, 0x44, 0xd4, 0x49, 0x12, 0x7e, 0x11, 0x40, 0x8d, 0xbc, 0x89, 0x7d, - 0xae, 0xff, 0xa8, 0xb8, 0xf6, 0xb0, 0x15, 0xdd, 0x95, 0x34, 0xd4, 0x41, 0x0e, 0xfe, 0xdc, 0x68, - 0x79, 0x93, 0xe4, 0x76, 0x4c, 0xcd, 0xab, 0x7c, 0xff, 0x6e, 0x9e, 0x31, 0x0b, 0xb5, 0xef, 0x20, - 0x91, 0x43, 0xb4, 0x30, 0x6b, 0x50, 0xa8, 0x8b, 0x0b, 0xb3, 0xec, 0x86, 0x9e, 0xaa, 0xf0, 0x70, - 0x12, 0x0c, 0x1c, 0x12, 0xf9, 0xaf, 0x0a, 0xc4, 0x7e, 0x42, 0x0b, 0x64, 0xeb, 0xd8, 0x89, 0xe2, - 0x21, 0x43, 0x8f, 0xbb, 0x03, 0x24, 0x8c, 0x3f, 0x9b, 0x79, 0xc6, 0x98, 0x7d, 0xdf, 0x00, 0x33, - 0x9d, 0x0f, 0x9e, 0x7b, 0xea, 0xd6, 0x2f, 0x0c, 0x30, 0xd5, 0x76, 0xc6, 0x74, 0xf0, 0xe8, 0x56, - 0xab, 0x47, 0xaf, 0xf4, 0xfa, 0xb0, 0x10, 0x9b, 0x83, 0x77, 0xc8, 0xba, 0x7b, 0x3f, 0x36, 0xc0, - 0x64, 0xba, 0x6c, 0xdf, 0xcb, 0x78, 0x15, 0xde, 0xcf, 0x80, 0x99, 0xce, 0x8d, 0x3d, 0x0c, 0xd4, - 0x04, 0xa3, 0x3f, 0x93, 0xa0, 0x4e, 0x53, 0xe3, 0x77, 0x0c, 0x30, 0xfa, 0x96, 0x92, 0x8b, 0xbf, - 0xba, 0xf7, 0x7c, 0x06, 0x15, 0x9f, 0x93, 0x09, 0x83, 0x22, 0x1d, 0xb7, 0xf0, 0x07, 0x03, 0x4c, - 0x77, 0x6c, 0x00, 0xe0, 0x15, 0x30, 0x84, 0x1d, 0xc7, 0x3b, 0x12, 0xa3, 0x44, 0xed, 0x1b, 0xc1, - 0x0a, 0xa7, 0x22, 0xc9, 0xd5, 0xa2, 0x97, 0xf9, 0xac, 0xa2, 0x57, 0xf8, 0xb3, 0x01, 0x2e, 0xdf, - 0x2e, 0x13, 0xef, 0xc9, 0x92, 0x2e, 0x82, 0x9c, 0x6c, 0xde, 0x4f, 0xf8, 0x72, 0xca, 0x52, 0x2c, - 0x8b, 0x06, 0xff, 0xa3, 0x99, 0xf8, 0x55, 0xf8, 0xc0, 0x00, 0x93, 0x65, 0x12, 0xd4, 0xed, 0x0a, - 0x41, 0x64, 0x9f, 0x04, 0xc4, 0xad, 0x10, 0xb8, 0x04, 0x46, 0xf8, 0xe7, 0x6e, 0x1f, 0x57, 0xe2, - 0x4f, 0x37, 0x53, 0x32, 0xe4, 0x23, 0x5b, 0x31, 0x03, 0x25, 0x32, 0xea, 0x33, 0x4f, 0xa6, 0xeb, - 0x67, 0x9e, 0xcb, 0x60, 0xd0, 0x4f, 0x06, 0xd1, 0x39, 0xc6, 0xe5, 0xb3, 0x67, 0x4e, 0xe5, 0x5c, - 0x2f, 0x08, 0xf9, 0x74, 0x2d, 0x2b, 0xb9, 0x5e, 0x10, 0x22, 0x4e, 0x2d, 0xbc, 0x06, 0xce, 0xb7, - 0x96, 0x71, 0x86, 0x17, 0x44, 0x4e, 0xdb, 0x67, 0x25, 0xc6, 0x43, 0x9c, 0xa3, 0xff, 0xdb, 0x25, - 0x73, 0x87, 0x7f, 0xbb, 0xfc, 0xc5, 0x00, 0x9d, 0xfe, 0x71, 0x06, 0x2f, 0x89, 0xf9, 0xa5, 0x36, - 0x14, 0x8c, 0x67, 0x97, 0xb0, 0x0e, 0x86, 0xa9, 0x08, 0x9a, 0x5c, 0xd4, 0xed, 0x33, 0x2e, 0x6a, - 0x7a, 0x09, 0x44, 0xe3, 0x14, 0x53, 0x63, 0x30, 0xb6, 0xae, 0x15, 0x5c, 0x8a, 0x5c, 0x4b, 0x8e, - 0xb4, 0xc7, 0xc4, 0xba, 0xae, 0xae, 0x08, 0x1a, 0x52, 0xdc, 0x52, 0xe5, 0xc3, 0x4f, 0xe6, 0xce, - 0x7d, 0xf4, 0xc9, 0xdc, 0xb9, 0x8f, 0x3f, 0x99, 0x3b, 0xf7, 0xad, 0xe6, 0x9c, 0xf1, 0x61, 0x73, - 0xce, 0xf8, 0xa8, 0x39, 0x67, 0x7c, 0xdc, 0x9c, 0x33, 0xfe, 0xd1, 0x9c, 0x33, 0x7e, 0xf2, 0xcf, - 0xb9, 0x73, 0x5f, 0x7b, 0xee, 0x4c, 0x7f, 0xf2, 0xfe, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, - 0x02, 0xc9, 0x93, 0x3d, 0x2e, 0x00, 0x00, + // 3105 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0x23, 0x47, + 0xd9, 0xdf, 0x91, 0x2c, 0x5b, 0x6e, 0x7b, 0x77, 0xed, 0xde, 0xb5, 0x33, 0xeb, 0x6c, 0x2c, 0x5b, + 0x79, 0xb3, 0xaf, 0x93, 0x6c, 0xe4, 0xc4, 0x6f, 0xf2, 0x26, 0xa4, 0x48, 0x51, 0x96, 0xed, 0x0d, + 0x4e, 0xd6, 0x1f, 0xb4, 0x76, 0x13, 0x43, 0x3e, 0xc7, 0x9a, 0xb6, 0x3c, 0xf1, 0x68, 0x66, 0xb6, + 0x7b, 0x46, 0xb6, 0x2b, 0x40, 0xf1, 0x51, 0x29, 0x28, 0x0a, 0x08, 0x45, 0x72, 0xa1, 0x80, 0x43, + 0xa0, 0xe0, 0xc0, 0x01, 0x0e, 0x70, 0x83, 0x3f, 0x20, 0xc7, 0x14, 0xc5, 0x21, 0x07, 0x4a, 0x10, + 0x71, 0xe5, 0x48, 0x15, 0x55, 0x3e, 0x51, 0xfd, 0x31, 0x3d, 0xad, 0x91, 0xb4, 0xbb, 0x15, 0x4b, + 0x59, 0x6e, 0xd2, 0xf3, 0xf5, 0x7b, 0xe6, 0xe9, 0xa7, 0x9f, 0x7e, 0xfa, 0x99, 0x01, 0x7b, 0x07, + 0xcf, 0xd0, 0x92, 0xe3, 0x2f, 0x1e, 0x44, 0xbb, 0x98, 0x78, 0x38, 0xc4, 0x74, 0xb1, 0x81, 0x3d, + 0xdb, 0x27, 0x8b, 0x92, 0x61, 0x05, 0x0e, 0x3e, 0x0a, 0xb1, 0x47, 0x1d, 0xdf, 0xa3, 0x8f, 0x59, + 0x81, 0x43, 0x31, 0x69, 0x60, 0xb2, 0x18, 0x1c, 0xd4, 0x18, 0x8f, 0xb6, 0x0b, 0x2c, 0x36, 0x9e, + 0xd8, 0xc5, 0xa1, 0xf5, 0xc4, 0x62, 0x0d, 0x7b, 0x98, 0x58, 0x21, 0xb6, 0x4b, 0x01, 0xf1, 0x43, + 0x1f, 0x3e, 0x27, 0xcc, 0x95, 0xda, 0xa4, 0xdf, 0x50, 0xe6, 0x4a, 0xc1, 0x41, 0x8d, 0xf1, 0x68, + 0xbb, 0x40, 0x49, 0x9a, 0x9b, 0x79, 0xac, 0xe6, 0x84, 0xfb, 0xd1, 0x6e, 0xa9, 0xea, 0xd7, 0x17, + 0x6b, 0x7e, 0xcd, 0x5f, 0xe4, 0x56, 0x77, 0xa3, 0x3d, 0xfe, 0x8f, 0xff, 0xe1, 0xbf, 0x04, 0xda, + 0xcc, 0x93, 0x89, 0xf3, 0x75, 0xab, 0xba, 0xef, 0x78, 0x98, 0x1c, 0x27, 0x1e, 0xd7, 0x71, 0x68, + 0x2d, 0x36, 0x3a, 0x7c, 0x9c, 0x59, 0xec, 0xa5, 0x45, 0x22, 0x2f, 0x74, 0xea, 0xb8, 0x43, 0xe1, + 0xff, 0xef, 0xa4, 0x40, 0xab, 0xfb, 0xb8, 0x6e, 0xa5, 0xf5, 0x8a, 0x27, 0x06, 0x98, 0x5c, 0xf1, + 0xbd, 0x06, 0x26, 0xec, 0x29, 0x11, 0xbe, 0x15, 0x61, 0x1a, 0xc2, 0x32, 0xc8, 0x46, 0x8e, 0x6d, + 0x1a, 0x73, 0xc6, 0xc2, 0x68, 0xf9, 0xf1, 0x0f, 0x9b, 0x85, 0x33, 0xad, 0x66, 0x21, 0x7b, 0x73, + 0x7d, 0xf5, 0xa4, 0x59, 0x98, 0xef, 0x85, 0x14, 0x1e, 0x07, 0x98, 0x96, 0x6e, 0xae, 0xaf, 0x22, + 0xa6, 0x0c, 0x9f, 0x07, 0x93, 0x36, 0xa6, 0x0e, 0xc1, 0xf6, 0xf2, 0xf6, 0xfa, 0x4b, 0xc2, 0xbe, + 0x99, 0xe1, 0x16, 0x2f, 0x49, 0x8b, 0x93, 0xab, 0x69, 0x01, 0xd4, 0xa9, 0x03, 0x77, 0xc0, 0x88, + 0xbf, 0xfb, 0x16, 0xae, 0x86, 0xd4, 0xcc, 0xce, 0x65, 0x17, 0xc6, 0x96, 0x1e, 0x2b, 0x25, 0x2b, + 0xa8, 0x5c, 0xe0, 0xcb, 0x26, 0x1f, 0xb6, 0x84, 0xac, 0xc3, 0xb5, 0x78, 0xe5, 0xca, 0xe7, 0x25, + 0xda, 0xc8, 0x96, 0xb0, 0x82, 0x62, 0x73, 0xc5, 0x5f, 0x66, 0x00, 0xd4, 0x1f, 0x9e, 0x06, 0xbe, + 0x47, 0x71, 0x5f, 0x9e, 0x9e, 0x82, 0x89, 0x2a, 0xb7, 0x1c, 0x62, 0x5b, 0xe2, 0x9a, 0x99, 0x4f, + 0xe3, 0xbd, 0x29, 0xf1, 0x27, 0x56, 0x52, 0xe6, 0x50, 0x07, 0x00, 0xbc, 0x01, 0x86, 0x09, 0xa6, + 0x91, 0x1b, 0x9a, 0xd9, 0x39, 0x63, 0x61, 0x6c, 0xe9, 0x6a, 0x4f, 0x28, 0x9e, 0xdf, 0x2c, 0xf9, + 0x4a, 0x8d, 0x27, 0x4a, 0x95, 0xd0, 0x0a, 0x23, 0x5a, 0x3e, 0x27, 0x91, 0x86, 0x11, 0xb7, 0x81, + 0xa4, 0xad, 0xe2, 0x77, 0x33, 0x60, 0x42, 0x8f, 0x52, 0xc3, 0xc1, 0x87, 0xf0, 0x10, 0x8c, 0x10, + 0x91, 0x2c, 0x3c, 0x4e, 0x63, 0x4b, 0xdb, 0xa5, 0x53, 0x6d, 0xab, 0x52, 0x47, 0x12, 0x96, 0xc7, + 0xd8, 0x9a, 0xc9, 0x3f, 0x28, 0x46, 0x83, 0x6f, 0x83, 0x3c, 0x91, 0x0b, 0xc5, 0xb3, 0x69, 0x6c, + 0xe9, 0x4b, 0x7d, 0x44, 0x16, 0x86, 0xcb, 0xe3, 0xad, 0x66, 0x21, 0x1f, 0xff, 0x43, 0x0a, 0xb0, + 0xf8, 0x5e, 0x06, 0xcc, 0xae, 0x44, 0x34, 0xf4, 0xeb, 0x08, 0x53, 0x3f, 0x22, 0x55, 0xbc, 0xe2, + 0xbb, 0x51, 0xdd, 0x5b, 0xc5, 0x7b, 0x8e, 0xe7, 0x84, 0x2c, 0x5b, 0xe7, 0xc0, 0x90, 0x67, 0xd5, + 0xb1, 0xcc, 0x9e, 0x71, 0x19, 0xd3, 0xa1, 0x4d, 0xab, 0x8e, 0x11, 0xe7, 0x30, 0x09, 0x96, 0x2c, + 0x72, 0x2f, 0x28, 0x89, 0x1b, 0xc7, 0x01, 0x46, 0x9c, 0x03, 0xaf, 0x80, 0xe1, 0x3d, 0x9f, 0xd4, + 0x2d, 0xb1, 0x8e, 0xa3, 0xc9, 0xca, 0x5c, 0xe3, 0x54, 0x24, 0xb9, 0xf0, 0x29, 0x30, 0x66, 0x63, + 0x5a, 0x25, 0x4e, 0xc0, 0xa0, 0xcd, 0x21, 0x2e, 0x7c, 0x41, 0x0a, 0x8f, 0xad, 0x26, 0x2c, 0xa4, + 0xcb, 0xc1, 0xab, 0x20, 0x1f, 0x10, 0xc7, 0x27, 0x4e, 0x78, 0x6c, 0xe6, 0xe6, 0x8c, 0x85, 0x5c, + 0x79, 0x42, 0xea, 0xe4, 0xb7, 0x25, 0x1d, 0x29, 0x09, 0x38, 0x07, 0xf2, 0x2f, 0x54, 0xb6, 0x36, + 0xb7, 0xad, 0x70, 0xdf, 0x1c, 0xe6, 0x08, 0x43, 0x4c, 0x1a, 0x29, 0x6a, 0xf1, 0xaf, 0x19, 0x60, + 0xa6, 0xa3, 0x12, 0x87, 0x14, 0x5e, 0x03, 0x79, 0x1a, 0xb2, 0x8a, 0x53, 0x3b, 0x96, 0x31, 0x79, + 0x24, 0x06, 0xab, 0x48, 0xfa, 0x49, 0xb3, 0x30, 0x9d, 0x68, 0xc4, 0x54, 0x1e, 0x0f, 0xa5, 0x0b, + 0x7f, 0x6e, 0x80, 0x0b, 0x87, 0x78, 0x77, 0xdf, 0xf7, 0x0f, 0x56, 0x5c, 0x07, 0x7b, 0xe1, 0x8a, + 0xef, 0xed, 0x39, 0x35, 0x99, 0x03, 0xe8, 0x94, 0x39, 0xf0, 0x72, 0xa7, 0xe5, 0xf2, 0x7d, 0xad, + 0x66, 0xe1, 0x42, 0x17, 0x06, 0xea, 0xe6, 0x07, 0xdc, 0x01, 0x66, 0x35, 0xb5, 0x49, 0x64, 0x01, + 0x13, 0x65, 0x6b, 0xb4, 0x7c, 0xb9, 0xd5, 0x2c, 0x98, 0x2b, 0x3d, 0x64, 0x50, 0x4f, 0xed, 0xe2, + 0xb7, 0xb3, 0xe9, 0xf0, 0x6a, 0xe9, 0xf6, 0x26, 0xc8, 0xb3, 0x6d, 0x6c, 0x5b, 0xa1, 0x25, 0x37, + 0xe2, 0xe3, 0x77, 0xb7, 0xe9, 0x45, 0xcd, 0xd8, 0xc0, 0xa1, 0x55, 0x86, 0x72, 0x41, 0x40, 0x42, + 0x43, 0xca, 0x2a, 0xfc, 0x1a, 0x18, 0xa2, 0x01, 0xae, 0xca, 0x40, 0xbf, 0x72, 0xda, 0xcd, 0xd6, + 0xe3, 0x41, 0x2a, 0x01, 0xae, 0x26, 0x7b, 0x81, 0xfd, 0x43, 0x1c, 0x16, 0xbe, 0x63, 0x80, 0x61, + 0xca, 0x0b, 0x94, 0x2c, 0x6a, 0xaf, 0x0d, 0xca, 0x83, 0x54, 0x15, 0x14, 0xff, 0x91, 0x04, 0x2f, + 0xfe, 0x2b, 0x03, 0xe6, 0x7b, 0xa9, 0xae, 0xf8, 0x9e, 0x2d, 0x96, 0x63, 0x5d, 0xee, 0x6d, 0x91, + 0xe9, 0x4f, 0xe9, 0x7b, 0xfb, 0xa4, 0x59, 0x78, 0xe8, 0x8e, 0x06, 0xb4, 0x22, 0xf0, 0x39, 0xf5, + 0xdc, 0xa2, 0x50, 0xcc, 0xb7, 0x3b, 0x76, 0xd2, 0x2c, 0x9c, 0x57, 0x6a, 0xed, 0xbe, 0xc2, 0x06, + 0x80, 0xae, 0x45, 0xc3, 0x1b, 0xc4, 0xf2, 0xa8, 0x30, 0xeb, 0xd4, 0xb1, 0x0c, 0xdf, 0x23, 0x77, + 0x97, 0x1e, 0x4c, 0xa3, 0x3c, 0x23, 0x21, 0xe1, 0xf5, 0x0e, 0x6b, 0xa8, 0x0b, 0x02, 0xab, 0x5b, + 0x04, 0x5b, 0x54, 0x95, 0x22, 0xed, 0x44, 0x61, 0x54, 0x24, 0xb9, 0xf0, 0x61, 0x30, 0x52, 0xc7, + 0x94, 0x5a, 0x35, 0xcc, 0xeb, 0xcf, 0x68, 0x72, 0x44, 0x6f, 0x08, 0x32, 0x8a, 0xf9, 0xac, 0x3f, + 0xb9, 0xdc, 0x2b, 0x6a, 0xd7, 0x1d, 0x1a, 0xc2, 0x57, 0x3b, 0x36, 0x40, 0xe9, 0xee, 0x9e, 0x90, + 0x69, 0xf3, 0xf4, 0x57, 0xc5, 0x2f, 0xa6, 0x68, 0xc9, 0xff, 0x55, 0x90, 0x73, 0x42, 0x5c, 0x8f, + 0xcf, 0xee, 0x97, 0x07, 0x94, 0x7b, 0xe5, 0xb3, 0xd2, 0x87, 0xdc, 0x3a, 0x43, 0x43, 0x02, 0xb4, + 0xf8, 0xab, 0x0c, 0x78, 0xa0, 0x97, 0x0a, 0x3b, 0x50, 0x28, 0x8b, 0x78, 0xe0, 0x46, 0xc4, 0x72, + 0x65, 0xc6, 0xa9, 0x88, 0x6f, 0x73, 0x2a, 0x92, 0x5c, 0x56, 0xf2, 0xa9, 0xe3, 0xd5, 0x22, 0xd7, + 0x22, 0x32, 0x9d, 0xd4, 0x53, 0x57, 0x24, 0x1d, 0x29, 0x09, 0x58, 0x02, 0x80, 0xee, 0xfb, 0x24, + 0xe4, 0x18, 0xb2, 0x7a, 0x9d, 0x63, 0x05, 0xa2, 0xa2, 0xa8, 0x48, 0x93, 0x60, 0x27, 0xda, 0x81, + 0xe3, 0xd9, 0x72, 0xd5, 0xd5, 0x2e, 0x7e, 0xd1, 0xf1, 0x6c, 0xc4, 0x39, 0x0c, 0xdf, 0x75, 0x68, + 0xc8, 0x28, 0x72, 0xc9, 0xdb, 0xa2, 0xce, 0x25, 0x95, 0x04, 0xc3, 0xaf, 0xb2, 0xaa, 0xef, 0x13, + 0x07, 0x53, 0x73, 0x38, 0xc1, 0x5f, 0x51, 0x54, 0xa4, 0x49, 0x14, 0xff, 0x99, 0xef, 0x9d, 0x24, + 0xac, 0x94, 0xc0, 0x07, 0x41, 0xae, 0x46, 0xfc, 0x28, 0x90, 0x51, 0x52, 0xd1, 0x7e, 0x9e, 0x11, + 0x91, 0xe0, 0xb1, 0xac, 0x6c, 0xb4, 0xb5, 0xa9, 0x2a, 0x2b, 0xe3, 0xe6, 0x34, 0xe6, 0xc3, 0x6f, + 0x1a, 0x20, 0xe7, 0xc9, 0xe0, 0xb0, 0x94, 0x7b, 0x75, 0x40, 0x79, 0xc1, 0xc3, 0x9b, 0xb8, 0x2b, + 0x22, 0x2f, 0x90, 0xe1, 0x93, 0x20, 0x47, 0xab, 0x7e, 0x80, 0x65, 0xd4, 0x67, 0x63, 0xa1, 0x0a, + 0x23, 0x9e, 0x34, 0x0b, 0x67, 0x63, 0x73, 0x9c, 0x80, 0x84, 0x30, 0xfc, 0x8e, 0x01, 0x40, 0xc3, + 0x72, 0x1d, 0xdb, 0xe2, 0x2d, 0x43, 0x8e, 0xbb, 0xdf, 0xdf, 0xb4, 0x7e, 0x49, 0x99, 0x17, 0x8b, + 0x96, 0xfc, 0x47, 0x1a, 0x34, 0x7c, 0xd7, 0x00, 0xe3, 0x34, 0xda, 0x25, 0x52, 0x8b, 0xf2, 0xe6, + 0x62, 0x6c, 0xe9, 0xcb, 0x7d, 0xf5, 0xa5, 0xa2, 0x01, 0x94, 0x27, 0x5a, 0xcd, 0xc2, 0xb8, 0x4e, + 0x41, 0x6d, 0x0e, 0xc0, 0xef, 0x1b, 0x20, 0xdf, 0x88, 0xcf, 0xec, 0x11, 0xbe, 0xe1, 0x5f, 0x1f, + 0xd0, 0xc2, 0xca, 0x8c, 0x4a, 0x76, 0x81, 0xea, 0x03, 0x94, 0x07, 0xf0, 0x8f, 0x06, 0x30, 0x2d, + 0x5b, 0x14, 0x78, 0xcb, 0xdd, 0x26, 0x8e, 0x17, 0x62, 0x22, 0xfa, 0x4d, 0x6a, 0xe6, 0xb9, 0x7b, + 0xfd, 0x3d, 0x0b, 0xd3, 0xbd, 0x6c, 0x79, 0x4e, 0x7a, 0x67, 0x2e, 0xf7, 0x70, 0x03, 0xf5, 0x74, + 0x90, 0x27, 0x5a, 0xd2, 0xd2, 0x98, 0xa3, 0x03, 0x48, 0xb4, 0xa4, 0x97, 0x92, 0xd5, 0x21, 0xe9, + 0xa0, 0x34, 0x68, 0xb8, 0x05, 0xa6, 0x02, 0x82, 0x39, 0xc0, 0x4d, 0xef, 0xc0, 0xf3, 0x0f, 0xbd, + 0x6b, 0x0e, 0x76, 0x6d, 0x6a, 0x82, 0x39, 0x63, 0x21, 0x5f, 0xbe, 0xd4, 0x6a, 0x16, 0xa6, 0xb6, + 0xbb, 0x09, 0xa0, 0xee, 0x7a, 0xc5, 0x77, 0xb3, 0xe9, 0x5b, 0x40, 0xba, 0x8b, 0x80, 0xef, 0x8b, + 0xa7, 0x17, 0xb1, 0xa1, 0xa6, 0xc1, 0x57, 0xeb, 0xcd, 0x01, 0x25, 0x93, 0x6a, 0x03, 0x92, 0x4e, + 0x4e, 0x91, 0x28, 0xd2, 0xfc, 0x80, 0x3f, 0x31, 0xc0, 0x59, 0xab, 0x5a, 0xc5, 0x41, 0x88, 0x6d, + 0x51, 0xdc, 0x33, 0x9f, 0x41, 0xfd, 0x9a, 0x92, 0x5e, 0x9d, 0x5d, 0xd6, 0xa1, 0x51, 0xbb, 0x27, + 0xf0, 0x59, 0x70, 0x8e, 0x86, 0x3e, 0xc1, 0x76, 0xaa, 0x6d, 0x86, 0xad, 0x66, 0xe1, 0x5c, 0xa5, + 0x8d, 0x83, 0x52, 0x92, 0xc5, 0xbf, 0xe5, 0x40, 0xe1, 0x0e, 0x5b, 0xed, 0x2e, 0x2e, 0x66, 0x57, + 0xc0, 0x30, 0x7f, 0x5c, 0x9b, 0x47, 0x25, 0xaf, 0xb5, 0x82, 0x9c, 0x8a, 0x24, 0x97, 0x1d, 0x14, + 0x0c, 0x9f, 0xb5, 0x2f, 0x59, 0x2e, 0xa8, 0x0e, 0x8a, 0x8a, 0x20, 0xa3, 0x98, 0x0f, 0x97, 0x00, + 0xb0, 0x71, 0x40, 0x30, 0x3b, 0xac, 0x6c, 0x73, 0x84, 0x4b, 0xab, 0x45, 0x5a, 0x55, 0x1c, 0xa4, + 0x49, 0xc1, 0x6b, 0x00, 0xc6, 0xff, 0x1c, 0xdf, 0x7b, 0xd9, 0x22, 0x9e, 0xe3, 0xd5, 0xcc, 0x3c, + 0x77, 0x7b, 0x9a, 0x75, 0x63, 0xab, 0x1d, 0x5c, 0xd4, 0x45, 0x03, 0xbe, 0x0d, 0x86, 0xc5, 0xd0, + 0x87, 0x9f, 0x10, 0x03, 0xac, 0xf2, 0x80, 0xc7, 0x88, 0x43, 0x21, 0x09, 0xd9, 0x59, 0xdd, 0x73, + 0xf7, 0xba, 0xba, 0xdf, 0xb6, 0x9c, 0x0e, 0xff, 0x97, 0x97, 0xd3, 0xe2, 0xbf, 0x8d, 0x74, 0xcd, + 0xd1, 0x1e, 0xb5, 0x52, 0xb5, 0x5c, 0x0c, 0x57, 0xc1, 0x04, 0xbb, 0x31, 0x21, 0x1c, 0xb8, 0x4e, + 0xd5, 0xa2, 0xfc, 0xc2, 0x2e, 0x92, 0x5d, 0xcd, 0x90, 0x2a, 0x29, 0x3e, 0xea, 0xd0, 0x80, 0x2f, + 0x00, 0x28, 0x6e, 0x11, 0x6d, 0x76, 0x44, 0x43, 0xa4, 0xee, 0x03, 0x95, 0x0e, 0x09, 0xd4, 0x45, + 0x0b, 0xae, 0x80, 0x49, 0xd7, 0xda, 0xc5, 0x6e, 0x05, 0xbb, 0xb8, 0x1a, 0xfa, 0x84, 0x9b, 0x12, + 0x23, 0x8d, 0xa9, 0x56, 0xb3, 0x30, 0x79, 0x3d, 0xcd, 0x44, 0x9d, 0xf2, 0xc5, 0xf9, 0xf4, 0xd6, + 0xd6, 0x1f, 0x5c, 0xdc, 0xcd, 0x3e, 0xc8, 0x80, 0x99, 0xde, 0x99, 0x01, 0xbf, 0x95, 0x5c, 0x21, + 0xc5, 0x0d, 0xe1, 0xf5, 0x41, 0x65, 0xa1, 0xbc, 0x43, 0x82, 0xce, 0xfb, 0x23, 0xfc, 0x3a, 0x6b, + 0xd7, 0x2c, 0x37, 0x1e, 0x5a, 0xbd, 0x36, 0x30, 0x17, 0x18, 0x48, 0x79, 0x54, 0x74, 0x82, 0x96, + 0xcb, 0x1b, 0x3f, 0xcb, 0xc5, 0xc5, 0xdf, 0x18, 0xe9, 0x29, 0x42, 0xb2, 0x83, 0xe1, 0x0f, 0x0c, + 0x70, 0xde, 0x0f, 0xb0, 0xb7, 0xbc, 0xbd, 0xfe, 0xd2, 0xff, 0x89, 0x9d, 0x2c, 0x43, 0xb5, 0x79, + 0x4a, 0x3f, 0x5f, 0xa8, 0x6c, 0x6d, 0x0a, 0x83, 0xdb, 0xc4, 0x0f, 0x68, 0xf9, 0x42, 0xab, 0x59, + 0x38, 0xbf, 0xd5, 0x0e, 0x85, 0xd2, 0xd8, 0xc5, 0x3a, 0x98, 0x5a, 0x3b, 0x0a, 0x31, 0xf1, 0x2c, + 0x77, 0xd5, 0xaf, 0x46, 0x75, 0xec, 0x85, 0xc2, 0xd1, 0xd4, 0xc4, 0xcb, 0xb8, 0xcb, 0x89, 0xd7, + 0x03, 0x20, 0x1b, 0x11, 0x57, 0x66, 0xf1, 0x98, 0x9a, 0xe8, 0xa2, 0xeb, 0x88, 0xd1, 0x8b, 0xf3, + 0x60, 0x88, 0xf9, 0x09, 0x2f, 0x81, 0x2c, 0xb1, 0x0e, 0xb9, 0xd5, 0xf1, 0xf2, 0x08, 0x13, 0x41, + 0xd6, 0x21, 0x62, 0xb4, 0xe2, 0x5f, 0xe6, 0xc1, 0xf9, 0xd4, 0xb3, 0xc0, 0x19, 0x90, 0x51, 0x63, + 0x62, 0x20, 0x8d, 0x66, 0xd6, 0x57, 0x51, 0xc6, 0xb1, 0xe1, 0xd3, 0xaa, 0xf8, 0x0a, 0xd0, 0x82, + 0x3a, 0x4b, 0x38, 0x95, 0xf5, 0xe7, 0x89, 0x39, 0xe6, 0x48, 0x5c, 0x38, 0x99, 0x0f, 0x78, 0x4f, + 0xee, 0x12, 0xe1, 0x03, 0xde, 0x43, 0x8c, 0xf6, 0x69, 0xc7, 0x7d, 0xf1, 0xbc, 0x31, 0x77, 0x17, + 0xf3, 0xc6, 0xe1, 0xdb, 0xce, 0x1b, 0x1f, 0x04, 0xb9, 0xd0, 0x09, 0x5d, 0xcc, 0x0f, 0x32, 0xed, + 0x1a, 0x75, 0x83, 0x11, 0x91, 0xe0, 0xc1, 0xb7, 0xc0, 0x88, 0x8d, 0xf7, 0xac, 0xc8, 0x0d, 0xf9, + 0x99, 0x35, 0xb6, 0xb4, 0xd2, 0x87, 0x14, 0x12, 0xc3, 0xe0, 0x55, 0x61, 0x17, 0xc5, 0x00, 0xf0, + 0x21, 0x30, 0x52, 0xb7, 0x8e, 0x9c, 0x7a, 0x54, 0xe7, 0x0d, 0xa6, 0x21, 0xc4, 0x36, 0x04, 0x09, + 0xc5, 0x3c, 0x56, 0x19, 0xf1, 0x51, 0xd5, 0x8d, 0xa8, 0xd3, 0xc0, 0x92, 0x29, 0x9b, 0x3f, 0x55, + 0x19, 0xd7, 0x52, 0x7c, 0xd4, 0xa1, 0xc1, 0xc1, 0x1c, 0x8f, 0x2b, 0x8f, 0x69, 0x60, 0x82, 0x84, + 0x62, 0x5e, 0x3b, 0x98, 0x94, 0x1f, 0xef, 0x05, 0x26, 0x95, 0x3b, 0x34, 0xe0, 0xa3, 0x60, 0xb4, + 0x6e, 0x1d, 0x5d, 0xc7, 0x5e, 0x2d, 0xdc, 0x37, 0xcf, 0xce, 0x19, 0x0b, 0xd9, 0xf2, 0xd9, 0x56, + 0xb3, 0x30, 0xba, 0x11, 0x13, 0x51, 0xc2, 0xe7, 0xc2, 0x8e, 0x27, 0x85, 0xcf, 0x69, 0xc2, 0x31, + 0x11, 0x25, 0x7c, 0xd6, 0xbd, 0x04, 0x56, 0xc8, 0x36, 0x97, 0x79, 0xbe, 0xfd, 0x9a, 0xbb, 0x2d, + 0xc8, 0x28, 0xe6, 0xc3, 0x05, 0x90, 0xaf, 0x5b, 0x47, 0x7c, 0x24, 0x61, 0x4e, 0x70, 0xb3, 0x7c, + 0x30, 0xbe, 0x21, 0x69, 0x48, 0x71, 0xb9, 0xa4, 0xe3, 0x09, 0xc9, 0x49, 0x4d, 0x52, 0xd2, 0x90, + 0xe2, 0xb2, 0x24, 0x8e, 0x3c, 0xe7, 0x56, 0x84, 0x85, 0x30, 0xe4, 0x91, 0x51, 0x49, 0x7c, 0x33, + 0x61, 0x21, 0x5d, 0x0e, 0x96, 0x00, 0xa8, 0x47, 0x6e, 0xe8, 0x04, 0x2e, 0xde, 0xda, 0x33, 0x2f, + 0xf0, 0xf8, 0xf3, 0xa6, 0x7f, 0x43, 0x51, 0x91, 0x26, 0x01, 0x31, 0x18, 0xc2, 0x5e, 0x54, 0x37, + 0x2f, 0xf2, 0x83, 0xbd, 0x2f, 0x29, 0xa8, 0x76, 0xce, 0x9a, 0x17, 0xd5, 0x11, 0x37, 0x0f, 0x9f, + 0x06, 0x67, 0xeb, 0xd6, 0x11, 0x2b, 0x07, 0x98, 0x84, 0x0e, 0xa6, 0xe6, 0x14, 0x7f, 0xf8, 0x49, + 0xd6, 0xed, 0x6e, 0xe8, 0x0c, 0xd4, 0x2e, 0xc7, 0x15, 0x1d, 0x4f, 0x53, 0x9c, 0xd6, 0x14, 0x75, + 0x06, 0x6a, 0x97, 0x63, 0x91, 0x26, 0xf8, 0x56, 0xe4, 0x10, 0x6c, 0x9b, 0xf7, 0xf1, 0x06, 0x59, + 0xbe, 0xac, 0x10, 0x34, 0xa4, 0xb8, 0xb0, 0x11, 0xcf, 0xae, 0x4c, 0xbe, 0x0d, 0x6f, 0xf6, 0xb7, + 0x92, 0x6f, 0x91, 0x65, 0x42, 0xac, 0x63, 0x71, 0xd2, 0xe8, 0x53, 0x2b, 0x48, 0x41, 0xce, 0x72, + 0xdd, 0xad, 0x3d, 0xf3, 0x12, 0x8f, 0x7d, 0xbf, 0x4f, 0x10, 0x55, 0x75, 0x96, 0x19, 0x08, 0x12, + 0x58, 0x0c, 0xd4, 0xf7, 0x58, 0x6a, 0xcc, 0x0c, 0x16, 0x74, 0x8b, 0x81, 0x20, 0x81, 0xc5, 0x9f, + 0xd4, 0x3b, 0xde, 0xda, 0x33, 0xef, 0x1f, 0xf0, 0x93, 0x32, 0x10, 0x24, 0xb0, 0xa0, 0x03, 0xb2, + 0x9e, 0x1f, 0x9a, 0x97, 0x07, 0x72, 0x3c, 0xf3, 0x03, 0x67, 0xd3, 0x0f, 0x11, 0xc3, 0x80, 0x3f, + 0x36, 0x00, 0x08, 0x92, 0x14, 0x7d, 0xa0, 0x2f, 0x23, 0x91, 0x14, 0x64, 0x29, 0xc9, 0xed, 0x35, + 0x2f, 0x24, 0xc7, 0xc9, 0xf5, 0x48, 0xdb, 0x03, 0x9a, 0x17, 0xf0, 0x17, 0x06, 0xb8, 0xa8, 0xb7, + 0xc9, 0xca, 0xbd, 0x59, 0x1e, 0x91, 0x1b, 0xfd, 0x4e, 0xf3, 0xb2, 0xef, 0xbb, 0x65, 0xb3, 0xd5, + 0x2c, 0x5c, 0x5c, 0xee, 0x82, 0x8a, 0xba, 0xfa, 0x02, 0x7f, 0x6b, 0x80, 0x49, 0x59, 0x45, 0x35, + 0x0f, 0x0b, 0x3c, 0x80, 0xb8, 0xdf, 0x01, 0x4c, 0xe3, 0x88, 0x38, 0xaa, 0x97, 0xec, 0x1d, 0x7c, + 0xd4, 0xe9, 0x1a, 0xfc, 0x83, 0x01, 0xc6, 0x6d, 0x1c, 0x60, 0xcf, 0xc6, 0x5e, 0x95, 0xf9, 0x3a, + 0xd7, 0x97, 0x91, 0x45, 0xda, 0xd7, 0x55, 0x0d, 0x42, 0xb8, 0x59, 0x92, 0x6e, 0x8e, 0xeb, 0xac, + 0x93, 0x66, 0x61, 0x3a, 0x51, 0xd5, 0x39, 0xa8, 0xcd, 0x4b, 0xf8, 0x9e, 0x01, 0xce, 0x27, 0x0b, + 0x20, 0x8e, 0x94, 0xf9, 0x01, 0xe6, 0x01, 0x6f, 0x5f, 0x97, 0xdb, 0x01, 0x51, 0xda, 0x03, 0xf8, + 0x3b, 0x83, 0x75, 0x6a, 0xf1, 0xbd, 0x8f, 0x9a, 0x45, 0x1e, 0xcb, 0x37, 0xfa, 0x1e, 0x4b, 0x85, + 0x20, 0x42, 0x79, 0x35, 0x69, 0x05, 0x15, 0xe7, 0xa4, 0x59, 0x98, 0xd2, 0x23, 0xa9, 0x18, 0x48, + 0xf7, 0x10, 0x7e, 0xcf, 0x00, 0xe3, 0x38, 0xe9, 0xb8, 0xa9, 0xf9, 0x60, 0x5f, 0x82, 0xd8, 0xb5, + 0x89, 0x17, 0x37, 0x75, 0x8d, 0x45, 0x51, 0x1b, 0x36, 0xeb, 0x20, 0xf1, 0x91, 0x55, 0x0f, 0x5c, + 0x6c, 0xfe, 0x4f, 0x9f, 0x3b, 0xc8, 0x35, 0x61, 0x17, 0xc5, 0x00, 0xf0, 0x2a, 0xc8, 0x7b, 0x91, + 0xeb, 0x5a, 0xbb, 0x2e, 0x36, 0x1f, 0xe2, 0xbd, 0x88, 0x1a, 0xc9, 0x6e, 0x4a, 0x3a, 0x52, 0x12, + 0x70, 0x0f, 0xcc, 0x1d, 0xbd, 0xa8, 0x3e, 0x4f, 0xea, 0x3a, 0x34, 0x34, 0xaf, 0x70, 0x2b, 0x33, + 0xad, 0x66, 0x61, 0x7a, 0xa7, 0xfb, 0x58, 0xf1, 0x8e, 0x36, 0xe0, 0x2b, 0xe0, 0x7e, 0x4d, 0x66, + 0xad, 0xbe, 0x8b, 0x6d, 0x1b, 0xdb, 0xf1, 0xc5, 0xcd, 0xfc, 0x5f, 0x31, 0xb8, 0x8c, 0x37, 0xf8, + 0x4e, 0x5a, 0x00, 0xdd, 0x4e, 0x1b, 0x5e, 0x07, 0xd3, 0x1a, 0x7b, 0xdd, 0x0b, 0xb7, 0x48, 0x25, + 0x24, 0x8e, 0x57, 0x33, 0x17, 0xb8, 0xdd, 0x8b, 0xf1, 0x8e, 0xdc, 0xd1, 0x78, 0xa8, 0x87, 0x0e, + 0xfc, 0x62, 0x9b, 0x35, 0xfe, 0x0a, 0xcd, 0x0a, 0x5e, 0xc4, 0xc7, 0xd4, 0x7c, 0x98, 0x77, 0x27, + 0x7c, 0xb1, 0x77, 0x34, 0x3a, 0xea, 0x21, 0x0f, 0xbf, 0x00, 0x2e, 0xa4, 0x38, 0xec, 0x8a, 0x62, + 0x3e, 0x22, 0xee, 0x1a, 0xac, 0x9f, 0xdd, 0x89, 0x89, 0xa8, 0x9b, 0x24, 0xfc, 0x3c, 0x80, 0x1a, + 0x79, 0xc3, 0x0a, 0xb8, 0xfe, 0xa3, 0xe2, 0xda, 0xc3, 0x56, 0x74, 0x47, 0xd2, 0x50, 0x17, 0x39, + 0xf8, 0x53, 0xa3, 0xed, 0x49, 0x92, 0xdb, 0x31, 0x35, 0xaf, 0xf2, 0xfd, 0xbb, 0x71, 0xca, 0x2c, + 0xd4, 0xde, 0x83, 0x44, 0x2e, 0xd6, 0xc2, 0xac, 0x41, 0xa1, 0x1e, 0x2e, 0xcc, 0xb0, 0x1b, 0x7a, + 0xaa, 0xc2, 0xc3, 0x09, 0x90, 0x3d, 0xc0, 0xf2, 0xab, 0x0a, 0xc4, 0x7e, 0x42, 0x1b, 0xe4, 0x1a, + 0x96, 0x1b, 0xc5, 0x43, 0x86, 0x3e, 0x77, 0x07, 0x48, 0x18, 0x7f, 0x36, 0xf3, 0x8c, 0x31, 0xf3, + 0xbe, 0x01, 0xa6, 0xbb, 0x1f, 0x3c, 0xf7, 0xd4, 0xad, 0x9f, 0x19, 0x60, 0xb2, 0xe3, 0x8c, 0xe9, + 0xe2, 0xd1, 0xad, 0x76, 0x8f, 0x5e, 0xe9, 0xf7, 0x61, 0x21, 0x36, 0x07, 0xef, 0x90, 0x75, 0xf7, + 0x7e, 0x68, 0x80, 0x89, 0x74, 0xd9, 0xbe, 0x97, 0xf1, 0x2a, 0xbe, 0x9f, 0x01, 0xd3, 0xdd, 0x1b, + 0x7b, 0x48, 0xd4, 0x04, 0x63, 0x30, 0x93, 0xa0, 0x6e, 0x53, 0xe3, 0x77, 0x0c, 0x30, 0xf6, 0x96, + 0x92, 0x8b, 0xdf, 0xba, 0xf7, 0x7d, 0x06, 0x15, 0x9f, 0x93, 0x09, 0x83, 0x22, 0x1d, 0xb7, 0xf8, + 0x7b, 0x03, 0x4c, 0x75, 0x6d, 0x00, 0xe0, 0x15, 0x30, 0x6c, 0xb9, 0xae, 0x7f, 0x28, 0x46, 0x89, + 0xda, 0x3b, 0x82, 0x65, 0x4e, 0x45, 0x92, 0xab, 0x45, 0x2f, 0xf3, 0x59, 0x45, 0xaf, 0xf8, 0x27, + 0x03, 0x5c, 0xbe, 0x5d, 0x26, 0xde, 0x93, 0x25, 0x5d, 0x00, 0x79, 0xd9, 0xbc, 0x1f, 0xf3, 0xe5, + 0x94, 0xa5, 0x58, 0x16, 0x0d, 0xfe, 0xa1, 0x99, 0xf8, 0x55, 0xfc, 0xc0, 0x00, 0x13, 0x15, 0x4c, + 0x1a, 0x4e, 0x15, 0x23, 0xbc, 0x87, 0x09, 0xf6, 0xaa, 0x18, 0x2e, 0x82, 0x51, 0xfe, 0xba, 0x3b, + 0xb0, 0xaa, 0xf1, 0xab, 0x9b, 0x49, 0x19, 0xf2, 0xd1, 0xcd, 0x98, 0x81, 0x12, 0x19, 0xf5, 0x9a, + 0x27, 0xd3, 0xf3, 0x35, 0xcf, 0x65, 0x30, 0x14, 0x24, 0x83, 0xe8, 0x3c, 0xe3, 0xf2, 0xd9, 0x33, + 0xa7, 0x72, 0xae, 0x4f, 0x42, 0x3e, 0x5d, 0xcb, 0x49, 0xae, 0x4f, 0x42, 0xc4, 0xa9, 0xc5, 0x5f, + 0x1b, 0xe0, 0x5c, 0x7b, 0x1d, 0x67, 0x80, 0x24, 0x72, 0x3b, 0xde, 0x2b, 0x31, 0x1e, 0xe2, 0x1c, + 0xfd, 0x73, 0x97, 0xcc, 0xed, 0x3f, 0x77, 0x81, 0xcf, 0x83, 0x49, 0xf9, 0x73, 0xed, 0x28, 0x20, + 0x98, 0xf2, 0x77, 0xa7, 0xd9, 0xf6, 0x8f, 0x66, 0x37, 0xd2, 0x02, 0xa8, 0x53, 0xa7, 0xf8, 0x67, + 0x03, 0x74, 0xfb, 0x76, 0x0d, 0x5e, 0x12, 0x93, 0x50, 0x6d, 0xbc, 0x18, 0x4f, 0x41, 0x61, 0x03, + 0x8c, 0x50, 0x11, 0x7e, 0x99, 0x1e, 0x5b, 0xa7, 0x4c, 0x8f, 0xf4, 0x62, 0x8a, 0x16, 0x2c, 0xa6, + 0xc6, 0x60, 0x2c, 0x43, 0xaa, 0x56, 0x39, 0xf2, 0x6c, 0x39, 0x1c, 0x1f, 0x17, 0x19, 0xb2, 0xb2, + 0x2c, 0x68, 0x48, 0x71, 0xcb, 0xd5, 0x0f, 0x3f, 0x99, 0x3d, 0xf3, 0xd1, 0x27, 0xb3, 0x67, 0x3e, + 0xfe, 0x64, 0xf6, 0xcc, 0x37, 0x5a, 0xb3, 0xc6, 0x87, 0xad, 0x59, 0xe3, 0xa3, 0xd6, 0xac, 0xf1, + 0x71, 0x6b, 0xd6, 0xf8, 0x7b, 0x6b, 0xd6, 0xf8, 0xd1, 0x3f, 0x66, 0xcf, 0x7c, 0xe5, 0xb9, 0x53, + 0x7d, 0x2e, 0xfe, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x11, 0x77, 0x12, 0x7e, 0x87, 0x2e, 0x00, + 0x00, } func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { @@ -2656,6 +2658,11 @@ func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x1a i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) @@ -3345,6 +3352,8 @@ func (m *ValidationRule) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -3811,6 +3820,7 @@ func (this *ValidationRule) String() string { s := strings.Join([]string{`&ValidationRule{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, `}`, }, "") return s @@ -9037,6 +9047,38 @@ func (m *ValidationRule) Unmarshal(dAtA []byte) error { } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 3f8ba6c7..feb9fd9d 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -717,6 +717,19 @@ message ValidationRule { // If unset, the message is "failed rule: {Rule}". // e.g. "must be a URL with the host matching spec.host" optional string message = 2; + + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + optional string messageExpression = 3; } // WebhookClientConfig contains the information to make a TLS connection with the webhook. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go index c9d943c9..ab0169f9 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -235,12 +235,24 @@ type ValidationRule struct { // If unset, the message is "failed rule: {Rule}". // e.g. "must be a URL with the host matching spec.host" Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,3,opt,name=messageExpression"` } // JSON represents any valid JSON value. // These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. type JSON struct { - Raw []byte `protobuf:"bytes,1,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` } // OpenAPISchemaType is used by the kube-openapi generator when constructing diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index 54cae3cf..ef5bc5e3 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -1306,6 +1306,7 @@ func Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in *apie func autoConvert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { out.Rule = in.Rule out.Message = in.Message + out.MessageExpression = in.MessageExpression return nil } @@ -1317,6 +1318,7 @@ func Convert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in *Validati func autoConvert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { out.Rule = in.Rule out.Message = in.Message + out.MessageExpression = in.MessageExpression return nil } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcecolumndefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcecolumndefinition.go new file mode 100644 index 00000000..b83de1c4 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcecolumndefinition.go @@ -0,0 +1,84 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// CustomResourceColumnDefinitionApplyConfiguration represents an declarative configuration of the CustomResourceColumnDefinition type for use +// with apply. +type CustomResourceColumnDefinitionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Format *string `json:"format,omitempty"` + Description *string `json:"description,omitempty"` + Priority *int32 `json:"priority,omitempty"` + JSONPath *string `json:"jsonPath,omitempty"` +} + +// CustomResourceColumnDefinitionApplyConfiguration constructs an declarative configuration of the CustomResourceColumnDefinition type for use with +// apply. +func CustomResourceColumnDefinition() *CustomResourceColumnDefinitionApplyConfiguration { + return &CustomResourceColumnDefinitionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CustomResourceColumnDefinitionApplyConfiguration) WithName(value string) *CustomResourceColumnDefinitionApplyConfiguration { + b.Name = &value + return b +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *CustomResourceColumnDefinitionApplyConfiguration) WithType(value string) *CustomResourceColumnDefinitionApplyConfiguration { + b.Type = &value + return b +} + +// WithFormat sets the Format field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Format field is set to the value of the last call. +func (b *CustomResourceColumnDefinitionApplyConfiguration) WithFormat(value string) *CustomResourceColumnDefinitionApplyConfiguration { + b.Format = &value + return b +} + +// WithDescription sets the Description field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Description field is set to the value of the last call. +func (b *CustomResourceColumnDefinitionApplyConfiguration) WithDescription(value string) *CustomResourceColumnDefinitionApplyConfiguration { + b.Description = &value + return b +} + +// WithPriority sets the Priority field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Priority field is set to the value of the last call. +func (b *CustomResourceColumnDefinitionApplyConfiguration) WithPriority(value int32) *CustomResourceColumnDefinitionApplyConfiguration { + b.Priority = &value + return b +} + +// WithJSONPath sets the JSONPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JSONPath field is set to the value of the last call. +func (b *CustomResourceColumnDefinitionApplyConfiguration) WithJSONPath(value string) *CustomResourceColumnDefinitionApplyConfiguration { + b.JSONPath = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go new file mode 100644 index 00000000..8705d1a2 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +// CustomResourceConversionApplyConfiguration represents an declarative configuration of the CustomResourceConversion type for use +// with apply. +type CustomResourceConversionApplyConfiguration struct { + Strategy *v1.ConversionStrategyType `json:"strategy,omitempty"` + Webhook *WebhookConversionApplyConfiguration `json:"webhook,omitempty"` +} + +// CustomResourceConversionApplyConfiguration constructs an declarative configuration of the CustomResourceConversion type for use with +// apply. +func CustomResourceConversion() *CustomResourceConversionApplyConfiguration { + return &CustomResourceConversionApplyConfiguration{} +} + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *CustomResourceConversionApplyConfiguration) WithStrategy(value v1.ConversionStrategyType) *CustomResourceConversionApplyConfiguration { + b.Strategy = &value + return b +} + +// WithWebhook sets the Webhook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Webhook field is set to the value of the last call. +func (b *CustomResourceConversionApplyConfiguration) WithWebhook(value *WebhookConversionApplyConfiguration) *CustomResourceConversionApplyConfiguration { + b.Webhook = value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go new file mode 100644 index 00000000..f6b6edb7 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go @@ -0,0 +1,218 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// CustomResourceDefinitionApplyConfiguration represents an declarative configuration of the CustomResourceDefinition type for use +// with apply. +type CustomResourceDefinitionApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CustomResourceDefinitionSpecApplyConfiguration `json:"spec,omitempty"` + Status *CustomResourceDefinitionStatusApplyConfiguration `json:"status,omitempty"` +} + +// CustomResourceDefinition constructs an declarative configuration of the CustomResourceDefinition type for use with +// apply. +func CustomResourceDefinition(name string) *CustomResourceDefinitionApplyConfiguration { + b := &CustomResourceDefinitionApplyConfiguration{} + b.WithName(name) + b.WithKind("CustomResourceDefinition") + b.WithAPIVersion("apiextensions.k8s.io/v1") + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithKind(value string) *CustomResourceDefinitionApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithAPIVersion(value string) *CustomResourceDefinitionApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithName(value string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithGenerateName(value string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithNamespace(value string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithUID(value types.UID) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithResourceVersion(value string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithGeneration(value int64) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *CustomResourceDefinitionApplyConfiguration) WithLabels(entries map[string]string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *CustomResourceDefinitionApplyConfiguration) WithAnnotations(entries map[string]string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *CustomResourceDefinitionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *CustomResourceDefinitionApplyConfiguration) WithFinalizers(values ...string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *CustomResourceDefinitionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithSpec(value *CustomResourceDefinitionSpecApplyConfiguration) *CustomResourceDefinitionApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithStatus(value *CustomResourceDefinitionStatusApplyConfiguration) *CustomResourceDefinitionApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go new file mode 100644 index 00000000..2cf9dd4e --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CustomResourceDefinitionConditionApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionCondition type for use +// with apply. +type CustomResourceDefinitionConditionApplyConfiguration struct { + Type *v1.CustomResourceDefinitionConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// CustomResourceDefinitionConditionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionCondition type for use with +// apply. +func CustomResourceDefinitionCondition() *CustomResourceDefinitionConditionApplyConfiguration { + return &CustomResourceDefinitionConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value v1.CustomResourceDefinitionConditionType) *CustomResourceDefinitionConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *CustomResourceDefinitionConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *CustomResourceDefinitionConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithReason(value string) *CustomResourceDefinitionConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithMessage(value string) *CustomResourceDefinitionConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionnames.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionnames.go new file mode 100644 index 00000000..06b7a404 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionnames.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// CustomResourceDefinitionNamesApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionNames type for use +// with apply. +type CustomResourceDefinitionNamesApplyConfiguration struct { + Plural *string `json:"plural,omitempty"` + Singular *string `json:"singular,omitempty"` + ShortNames []string `json:"shortNames,omitempty"` + Kind *string `json:"kind,omitempty"` + ListKind *string `json:"listKind,omitempty"` + Categories []string `json:"categories,omitempty"` +} + +// CustomResourceDefinitionNamesApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionNames type for use with +// apply. +func CustomResourceDefinitionNames() *CustomResourceDefinitionNamesApplyConfiguration { + return &CustomResourceDefinitionNamesApplyConfiguration{} +} + +// WithPlural sets the Plural field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Plural field is set to the value of the last call. +func (b *CustomResourceDefinitionNamesApplyConfiguration) WithPlural(value string) *CustomResourceDefinitionNamesApplyConfiguration { + b.Plural = &value + return b +} + +// WithSingular sets the Singular field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Singular field is set to the value of the last call. +func (b *CustomResourceDefinitionNamesApplyConfiguration) WithSingular(value string) *CustomResourceDefinitionNamesApplyConfiguration { + b.Singular = &value + return b +} + +// WithShortNames adds the given value to the ShortNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ShortNames field. +func (b *CustomResourceDefinitionNamesApplyConfiguration) WithShortNames(values ...string) *CustomResourceDefinitionNamesApplyConfiguration { + for i := range values { + b.ShortNames = append(b.ShortNames, values[i]) + } + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *CustomResourceDefinitionNamesApplyConfiguration) WithKind(value string) *CustomResourceDefinitionNamesApplyConfiguration { + b.Kind = &value + return b +} + +// WithListKind sets the ListKind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ListKind field is set to the value of the last call. +func (b *CustomResourceDefinitionNamesApplyConfiguration) WithListKind(value string) *CustomResourceDefinitionNamesApplyConfiguration { + b.ListKind = &value + return b +} + +// WithCategories adds the given value to the Categories field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Categories field. +func (b *CustomResourceDefinitionNamesApplyConfiguration) WithCategories(values ...string) *CustomResourceDefinitionNamesApplyConfiguration { + for i := range values { + b.Categories = append(b.Categories, values[i]) + } + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionspec.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionspec.go new file mode 100644 index 00000000..0f52e4b1 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionspec.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +// CustomResourceDefinitionSpecApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionSpec type for use +// with apply. +type CustomResourceDefinitionSpecApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Names *CustomResourceDefinitionNamesApplyConfiguration `json:"names,omitempty"` + Scope *apiextensionsv1.ResourceScope `json:"scope,omitempty"` + Versions []CustomResourceDefinitionVersionApplyConfiguration `json:"versions,omitempty"` + Conversion *CustomResourceConversionApplyConfiguration `json:"conversion,omitempty"` + PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty"` +} + +// CustomResourceDefinitionSpecApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionSpec type for use with +// apply. +func CustomResourceDefinitionSpec() *CustomResourceDefinitionSpecApplyConfiguration { + return &CustomResourceDefinitionSpecApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithGroup(value string) *CustomResourceDefinitionSpecApplyConfiguration { + b.Group = &value + return b +} + +// WithNames sets the Names field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Names field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithNames(value *CustomResourceDefinitionNamesApplyConfiguration) *CustomResourceDefinitionSpecApplyConfiguration { + b.Names = value + return b +} + +// WithScope sets the Scope field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scope field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithScope(value apiextensionsv1.ResourceScope) *CustomResourceDefinitionSpecApplyConfiguration { + b.Scope = &value + return b +} + +// WithVersions adds the given value to the Versions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Versions field. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithVersions(values ...*CustomResourceDefinitionVersionApplyConfiguration) *CustomResourceDefinitionSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVersions") + } + b.Versions = append(b.Versions, *values[i]) + } + return b +} + +// WithConversion sets the Conversion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Conversion field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithConversion(value *CustomResourceConversionApplyConfiguration) *CustomResourceDefinitionSpecApplyConfiguration { + b.Conversion = value + return b +} + +// WithPreserveUnknownFields sets the PreserveUnknownFields field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreserveUnknownFields field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithPreserveUnknownFields(value bool) *CustomResourceDefinitionSpecApplyConfiguration { + b.PreserveUnknownFields = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionstatus.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionstatus.go new file mode 100644 index 00000000..a30fb726 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionstatus.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// CustomResourceDefinitionStatusApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionStatus type for use +// with apply. +type CustomResourceDefinitionStatusApplyConfiguration struct { + Conditions []CustomResourceDefinitionConditionApplyConfiguration `json:"conditions,omitempty"` + AcceptedNames *CustomResourceDefinitionNamesApplyConfiguration `json:"acceptedNames,omitempty"` + StoredVersions []string `json:"storedVersions,omitempty"` +} + +// CustomResourceDefinitionStatusApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionStatus type for use with +// apply. +func CustomResourceDefinitionStatus() *CustomResourceDefinitionStatusApplyConfiguration { + return &CustomResourceDefinitionStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *CustomResourceDefinitionStatusApplyConfiguration) WithConditions(values ...*CustomResourceDefinitionConditionApplyConfiguration) *CustomResourceDefinitionStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithAcceptedNames sets the AcceptedNames field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AcceptedNames field is set to the value of the last call. +func (b *CustomResourceDefinitionStatusApplyConfiguration) WithAcceptedNames(value *CustomResourceDefinitionNamesApplyConfiguration) *CustomResourceDefinitionStatusApplyConfiguration { + b.AcceptedNames = value + return b +} + +// WithStoredVersions adds the given value to the StoredVersions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the StoredVersions field. +func (b *CustomResourceDefinitionStatusApplyConfiguration) WithStoredVersions(values ...string) *CustomResourceDefinitionStatusApplyConfiguration { + for i := range values { + b.StoredVersions = append(b.StoredVersions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go new file mode 100644 index 00000000..1019b03e --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// CustomResourceDefinitionVersionApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionVersion type for use +// with apply. +type CustomResourceDefinitionVersionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Served *bool `json:"served,omitempty"` + Storage *bool `json:"storage,omitempty"` + Deprecated *bool `json:"deprecated,omitempty"` + DeprecationWarning *string `json:"deprecationWarning,omitempty"` + Schema *CustomResourceValidationApplyConfiguration `json:"schema,omitempty"` + Subresources *CustomResourceSubresourcesApplyConfiguration `json:"subresources,omitempty"` + AdditionalPrinterColumns []CustomResourceColumnDefinitionApplyConfiguration `json:"additionalPrinterColumns,omitempty"` +} + +// CustomResourceDefinitionVersionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionVersion type for use with +// apply. +func CustomResourceDefinitionVersion() *CustomResourceDefinitionVersionApplyConfiguration { + return &CustomResourceDefinitionVersionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithName(value string) *CustomResourceDefinitionVersionApplyConfiguration { + b.Name = &value + return b +} + +// WithServed sets the Served field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Served field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithServed(value bool) *CustomResourceDefinitionVersionApplyConfiguration { + b.Served = &value + return b +} + +// WithStorage sets the Storage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Storage field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithStorage(value bool) *CustomResourceDefinitionVersionApplyConfiguration { + b.Storage = &value + return b +} + +// WithDeprecated sets the Deprecated field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Deprecated field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithDeprecated(value bool) *CustomResourceDefinitionVersionApplyConfiguration { + b.Deprecated = &value + return b +} + +// WithDeprecationWarning sets the DeprecationWarning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeprecationWarning field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithDeprecationWarning(value string) *CustomResourceDefinitionVersionApplyConfiguration { + b.DeprecationWarning = &value + return b +} + +// WithSchema sets the Schema field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Schema field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithSchema(value *CustomResourceValidationApplyConfiguration) *CustomResourceDefinitionVersionApplyConfiguration { + b.Schema = value + return b +} + +// WithSubresources sets the Subresources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Subresources field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithSubresources(value *CustomResourceSubresourcesApplyConfiguration) *CustomResourceDefinitionVersionApplyConfiguration { + b.Subresources = value + return b +} + +// WithAdditionalPrinterColumns adds the given value to the AdditionalPrinterColumns field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalPrinterColumns field. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithAdditionalPrinterColumns(values ...*CustomResourceColumnDefinitionApplyConfiguration) *CustomResourceDefinitionVersionApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAdditionalPrinterColumns") + } + b.AdditionalPrinterColumns = append(b.AdditionalPrinterColumns, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go new file mode 100644 index 00000000..e91ede17 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +// CustomResourceSubresourcesApplyConfiguration represents an declarative configuration of the CustomResourceSubresources type for use +// with apply. +type CustomResourceSubresourcesApplyConfiguration struct { + Status *v1.CustomResourceSubresourceStatus `json:"status,omitempty"` + Scale *CustomResourceSubresourceScaleApplyConfiguration `json:"scale,omitempty"` +} + +// CustomResourceSubresourcesApplyConfiguration constructs an declarative configuration of the CustomResourceSubresources type for use with +// apply. +func CustomResourceSubresources() *CustomResourceSubresourcesApplyConfiguration { + return &CustomResourceSubresourcesApplyConfiguration{} +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *CustomResourceSubresourcesApplyConfiguration) WithStatus(value v1.CustomResourceSubresourceStatus) *CustomResourceSubresourcesApplyConfiguration { + b.Status = &value + return b +} + +// WithScale sets the Scale field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scale field is set to the value of the last call. +func (b *CustomResourceSubresourcesApplyConfiguration) WithScale(value *CustomResourceSubresourceScaleApplyConfiguration) *CustomResourceSubresourcesApplyConfiguration { + b.Scale = value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresourcescale.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresourcescale.go new file mode 100644 index 00000000..8159cec2 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresourcescale.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// CustomResourceSubresourceScaleApplyConfiguration represents an declarative configuration of the CustomResourceSubresourceScale type for use +// with apply. +type CustomResourceSubresourceScaleApplyConfiguration struct { + SpecReplicasPath *string `json:"specReplicasPath,omitempty"` + StatusReplicasPath *string `json:"statusReplicasPath,omitempty"` + LabelSelectorPath *string `json:"labelSelectorPath,omitempty"` +} + +// CustomResourceSubresourceScaleApplyConfiguration constructs an declarative configuration of the CustomResourceSubresourceScale type for use with +// apply. +func CustomResourceSubresourceScale() *CustomResourceSubresourceScaleApplyConfiguration { + return &CustomResourceSubresourceScaleApplyConfiguration{} +} + +// WithSpecReplicasPath sets the SpecReplicasPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SpecReplicasPath field is set to the value of the last call. +func (b *CustomResourceSubresourceScaleApplyConfiguration) WithSpecReplicasPath(value string) *CustomResourceSubresourceScaleApplyConfiguration { + b.SpecReplicasPath = &value + return b +} + +// WithStatusReplicasPath sets the StatusReplicasPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StatusReplicasPath field is set to the value of the last call. +func (b *CustomResourceSubresourceScaleApplyConfiguration) WithStatusReplicasPath(value string) *CustomResourceSubresourceScaleApplyConfiguration { + b.StatusReplicasPath = &value + return b +} + +// WithLabelSelectorPath sets the LabelSelectorPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelSelectorPath field is set to the value of the last call. +func (b *CustomResourceSubresourceScaleApplyConfiguration) WithLabelSelectorPath(value string) *CustomResourceSubresourceScaleApplyConfiguration { + b.LabelSelectorPath = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcevalidation.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcevalidation.go new file mode 100644 index 00000000..2e0bcbcb --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcevalidation.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// CustomResourceValidationApplyConfiguration represents an declarative configuration of the CustomResourceValidation type for use +// with apply. +type CustomResourceValidationApplyConfiguration struct { + OpenAPIV3Schema *JSONSchemaPropsApplyConfiguration `json:"openAPIV3Schema,omitempty"` +} + +// CustomResourceValidationApplyConfiguration constructs an declarative configuration of the CustomResourceValidation type for use with +// apply. +func CustomResourceValidation() *CustomResourceValidationApplyConfiguration { + return &CustomResourceValidationApplyConfiguration{} +} + +// WithOpenAPIV3Schema sets the OpenAPIV3Schema field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OpenAPIV3Schema field is set to the value of the last call. +func (b *CustomResourceValidationApplyConfiguration) WithOpenAPIV3Schema(value *JSONSchemaPropsApplyConfiguration) *CustomResourceValidationApplyConfiguration { + b.OpenAPIV3Schema = value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/externaldocumentation.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/externaldocumentation.go new file mode 100644 index 00000000..61856a15 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/externaldocumentation.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ExternalDocumentationApplyConfiguration represents an declarative configuration of the ExternalDocumentation type for use +// with apply. +type ExternalDocumentationApplyConfiguration struct { + Description *string `json:"description,omitempty"` + URL *string `json:"url,omitempty"` +} + +// ExternalDocumentationApplyConfiguration constructs an declarative configuration of the ExternalDocumentation type for use with +// apply. +func ExternalDocumentation() *ExternalDocumentationApplyConfiguration { + return &ExternalDocumentationApplyConfiguration{} +} + +// WithDescription sets the Description field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Description field is set to the value of the last call. +func (b *ExternalDocumentationApplyConfiguration) WithDescription(value string) *ExternalDocumentationApplyConfiguration { + b.Description = &value + return b +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *ExternalDocumentationApplyConfiguration) WithURL(value string) *ExternalDocumentationApplyConfiguration { + b.URL = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go new file mode 100644 index 00000000..730203ad --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go @@ -0,0 +1,463 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +// JSONSchemaPropsApplyConfiguration represents an declarative configuration of the JSONSchemaProps type for use +// with apply. +type JSONSchemaPropsApplyConfiguration struct { + ID *string `json:"id,omitempty"` + Schema *v1.JSONSchemaURL `json:"$schema,omitempty"` + Ref *string `json:"$ref,omitempty"` + Description *string `json:"description,omitempty"` + Type *string `json:"type,omitempty"` + Format *string `json:"format,omitempty"` + Title *string `json:"title,omitempty"` + Default *v1.JSON `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum *bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern *string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems *bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []v1.JSON `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *v1.JSONSchemaPropsOrArray `json:"items,omitempty"` + AllOf []JSONSchemaPropsApplyConfiguration `json:"allOf,omitempty"` + OneOf []JSONSchemaPropsApplyConfiguration `json:"oneOf,omitempty"` + AnyOf []JSONSchemaPropsApplyConfiguration `json:"anyOf,omitempty"` + Not *JSONSchemaPropsApplyConfiguration `json:"not,omitempty"` + Properties map[string]JSONSchemaPropsApplyConfiguration `json:"properties,omitempty"` + AdditionalProperties *v1.JSONSchemaPropsOrBool `json:"additionalProperties,omitempty"` + PatternProperties map[string]JSONSchemaPropsApplyConfiguration `json:"patternProperties,omitempty"` + Dependencies *v1.JSONSchemaDependencies `json:"dependencies,omitempty"` + AdditionalItems *v1.JSONSchemaPropsOrBool `json:"additionalItems,omitempty"` + Definitions *v1.JSONSchemaDefinitions `json:"definitions,omitempty"` + ExternalDocs *ExternalDocumentationApplyConfiguration `json:"externalDocs,omitempty"` + Example *v1.JSON `json:"example,omitempty"` + Nullable *bool `json:"nullable,omitempty"` + XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty"` + XEmbeddedResource *bool `json:"x-kubernetes-embedded-resource,omitempty"` + XIntOrString *bool `json:"x-kubernetes-int-or-string,omitempty"` + XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty"` + XListType *string `json:"x-kubernetes-list-type,omitempty"` + XMapType *string `json:"x-kubernetes-map-type,omitempty"` + XValidations *v1.ValidationRules `json:"x-kubernetes-validations,omitempty"` +} + +// JSONSchemaPropsApplyConfiguration constructs an declarative configuration of the JSONSchemaProps type for use with +// apply. +func JSONSchemaProps() *JSONSchemaPropsApplyConfiguration { + return &JSONSchemaPropsApplyConfiguration{} +} + +// WithID sets the ID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ID field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithID(value string) *JSONSchemaPropsApplyConfiguration { + b.ID = &value + return b +} + +// WithSchema sets the Schema field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Schema field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithSchema(value v1.JSONSchemaURL) *JSONSchemaPropsApplyConfiguration { + b.Schema = &value + return b +} + +// WithRef sets the Ref field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ref field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithRef(value string) *JSONSchemaPropsApplyConfiguration { + b.Ref = &value + return b +} + +// WithDescription sets the Description field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Description field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithDescription(value string) *JSONSchemaPropsApplyConfiguration { + b.Description = &value + return b +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithType(value string) *JSONSchemaPropsApplyConfiguration { + b.Type = &value + return b +} + +// WithFormat sets the Format field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Format field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithFormat(value string) *JSONSchemaPropsApplyConfiguration { + b.Format = &value + return b +} + +// WithTitle sets the Title field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Title field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithTitle(value string) *JSONSchemaPropsApplyConfiguration { + b.Title = &value + return b +} + +// WithDefault sets the Default field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Default field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithDefault(value v1.JSON) *JSONSchemaPropsApplyConfiguration { + b.Default = &value + return b +} + +// WithMaximum sets the Maximum field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Maximum field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMaximum(value float64) *JSONSchemaPropsApplyConfiguration { + b.Maximum = &value + return b +} + +// WithExclusiveMaximum sets the ExclusiveMaximum field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExclusiveMaximum field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithExclusiveMaximum(value bool) *JSONSchemaPropsApplyConfiguration { + b.ExclusiveMaximum = &value + return b +} + +// WithMinimum sets the Minimum field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Minimum field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMinimum(value float64) *JSONSchemaPropsApplyConfiguration { + b.Minimum = &value + return b +} + +// WithExclusiveMinimum sets the ExclusiveMinimum field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExclusiveMinimum field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithExclusiveMinimum(value bool) *JSONSchemaPropsApplyConfiguration { + b.ExclusiveMinimum = &value + return b +} + +// WithMaxLength sets the MaxLength field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxLength field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMaxLength(value int64) *JSONSchemaPropsApplyConfiguration { + b.MaxLength = &value + return b +} + +// WithMinLength sets the MinLength field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinLength field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMinLength(value int64) *JSONSchemaPropsApplyConfiguration { + b.MinLength = &value + return b +} + +// WithPattern sets the Pattern field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pattern field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithPattern(value string) *JSONSchemaPropsApplyConfiguration { + b.Pattern = &value + return b +} + +// WithMaxItems sets the MaxItems field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxItems field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMaxItems(value int64) *JSONSchemaPropsApplyConfiguration { + b.MaxItems = &value + return b +} + +// WithMinItems sets the MinItems field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinItems field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMinItems(value int64) *JSONSchemaPropsApplyConfiguration { + b.MinItems = &value + return b +} + +// WithUniqueItems sets the UniqueItems field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UniqueItems field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithUniqueItems(value bool) *JSONSchemaPropsApplyConfiguration { + b.UniqueItems = &value + return b +} + +// WithMultipleOf sets the MultipleOf field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MultipleOf field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMultipleOf(value float64) *JSONSchemaPropsApplyConfiguration { + b.MultipleOf = &value + return b +} + +// WithEnum adds the given value to the Enum field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Enum field. +func (b *JSONSchemaPropsApplyConfiguration) WithEnum(values ...v1.JSON) *JSONSchemaPropsApplyConfiguration { + for i := range values { + b.Enum = append(b.Enum, values[i]) + } + return b +} + +// WithMaxProperties sets the MaxProperties field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxProperties field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMaxProperties(value int64) *JSONSchemaPropsApplyConfiguration { + b.MaxProperties = &value + return b +} + +// WithMinProperties sets the MinProperties field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinProperties field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMinProperties(value int64) *JSONSchemaPropsApplyConfiguration { + b.MinProperties = &value + return b +} + +// WithRequired adds the given value to the Required field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Required field. +func (b *JSONSchemaPropsApplyConfiguration) WithRequired(values ...string) *JSONSchemaPropsApplyConfiguration { + for i := range values { + b.Required = append(b.Required, values[i]) + } + return b +} + +// WithItems sets the Items field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Items field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithItems(value v1.JSONSchemaPropsOrArray) *JSONSchemaPropsApplyConfiguration { + b.Items = &value + return b +} + +// WithAllOf adds the given value to the AllOf field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllOf field. +func (b *JSONSchemaPropsApplyConfiguration) WithAllOf(values ...*JSONSchemaPropsApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAllOf") + } + b.AllOf = append(b.AllOf, *values[i]) + } + return b +} + +// WithOneOf adds the given value to the OneOf field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OneOf field. +func (b *JSONSchemaPropsApplyConfiguration) WithOneOf(values ...*JSONSchemaPropsApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOneOf") + } + b.OneOf = append(b.OneOf, *values[i]) + } + return b +} + +// WithAnyOf adds the given value to the AnyOf field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AnyOf field. +func (b *JSONSchemaPropsApplyConfiguration) WithAnyOf(values ...*JSONSchemaPropsApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAnyOf") + } + b.AnyOf = append(b.AnyOf, *values[i]) + } + return b +} + +// WithNot sets the Not field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Not field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithNot(value *JSONSchemaPropsApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + b.Not = value + return b +} + +// WithProperties puts the entries into the Properties field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Properties field, +// overwriting an existing map entries in Properties field with the same key. +func (b *JSONSchemaPropsApplyConfiguration) WithProperties(entries map[string]JSONSchemaPropsApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + if b.Properties == nil && len(entries) > 0 { + b.Properties = make(map[string]JSONSchemaPropsApplyConfiguration, len(entries)) + } + for k, v := range entries { + b.Properties[k] = v + } + return b +} + +// WithAdditionalProperties sets the AdditionalProperties field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdditionalProperties field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalProperties(value v1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { + b.AdditionalProperties = &value + return b +} + +// WithPatternProperties puts the entries into the PatternProperties field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the PatternProperties field, +// overwriting an existing map entries in PatternProperties field with the same key. +func (b *JSONSchemaPropsApplyConfiguration) WithPatternProperties(entries map[string]JSONSchemaPropsApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + if b.PatternProperties == nil && len(entries) > 0 { + b.PatternProperties = make(map[string]JSONSchemaPropsApplyConfiguration, len(entries)) + } + for k, v := range entries { + b.PatternProperties[k] = v + } + return b +} + +// WithDependencies sets the Dependencies field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Dependencies field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value v1.JSONSchemaDependencies) *JSONSchemaPropsApplyConfiguration { + b.Dependencies = &value + return b +} + +// WithAdditionalItems sets the AdditionalItems field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdditionalItems field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value v1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { + b.AdditionalItems = &value + return b +} + +// WithDefinitions sets the Definitions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Definitions field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithDefinitions(value v1.JSONSchemaDefinitions) *JSONSchemaPropsApplyConfiguration { + b.Definitions = &value + return b +} + +// WithExternalDocs sets the ExternalDocs field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExternalDocs field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithExternalDocs(value *ExternalDocumentationApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + b.ExternalDocs = value + return b +} + +// WithExample sets the Example field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Example field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithExample(value v1.JSON) *JSONSchemaPropsApplyConfiguration { + b.Example = &value + return b +} + +// WithNullable sets the Nullable field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Nullable field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithNullable(value bool) *JSONSchemaPropsApplyConfiguration { + b.Nullable = &value + return b +} + +// WithXPreserveUnknownFields sets the XPreserveUnknownFields field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XPreserveUnknownFields field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithXPreserveUnknownFields(value bool) *JSONSchemaPropsApplyConfiguration { + b.XPreserveUnknownFields = &value + return b +} + +// WithXEmbeddedResource sets the XEmbeddedResource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XEmbeddedResource field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithXEmbeddedResource(value bool) *JSONSchemaPropsApplyConfiguration { + b.XEmbeddedResource = &value + return b +} + +// WithXIntOrString sets the XIntOrString field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XIntOrString field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithXIntOrString(value bool) *JSONSchemaPropsApplyConfiguration { + b.XIntOrString = &value + return b +} + +// WithXListMapKeys adds the given value to the XListMapKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the XListMapKeys field. +func (b *JSONSchemaPropsApplyConfiguration) WithXListMapKeys(values ...string) *JSONSchemaPropsApplyConfiguration { + for i := range values { + b.XListMapKeys = append(b.XListMapKeys, values[i]) + } + return b +} + +// WithXListType sets the XListType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XListType field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithXListType(value string) *JSONSchemaPropsApplyConfiguration { + b.XListType = &value + return b +} + +// WithXMapType sets the XMapType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XMapType field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithXMapType(value string) *JSONSchemaPropsApplyConfiguration { + b.XMapType = &value + return b +} + +// WithXValidations sets the XValidations field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XValidations field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithXValidations(value v1.ValidationRules) *JSONSchemaPropsApplyConfiguration { + b.XValidations = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/servicereference.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/servicereference.go new file mode 100644 index 00000000..2cd55d9e --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/servicereference.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// with apply. +type ServiceReferenceApplyConfiguration struct { + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + Path *string `json:"path,omitempty"` + Port *int32 `json:"port,omitempty"` +} + +// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// apply. +func ServiceReference() *ServiceReferenceApplyConfiguration { + return &ServiceReferenceApplyConfiguration{} +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceReferenceApplyConfiguration) WithNamespace(value string) *ServiceReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceReferenceApplyConfiguration) WithName(value string) *ServiceReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *ServiceReferenceApplyConfiguration) WithPath(value string) *ServiceReferenceApplyConfiguration { + b.Path = &value + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *ServiceReferenceApplyConfiguration) WithPort(value int32) *ServiceReferenceApplyConfiguration { + b.Port = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go new file mode 100644 index 00000000..81107764 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ValidationRuleApplyConfiguration represents an declarative configuration of the ValidationRule type for use +// with apply. +type ValidationRuleApplyConfiguration struct { + Rule *string `json:"rule,omitempty"` + Message *string `json:"message,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` +} + +// ValidationRuleApplyConfiguration constructs an declarative configuration of the ValidationRule type for use with +// apply. +func ValidationRule() *ValidationRuleApplyConfiguration { + return &ValidationRuleApplyConfiguration{} +} + +// WithRule sets the Rule field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Rule field is set to the value of the last call. +func (b *ValidationRuleApplyConfiguration) WithRule(value string) *ValidationRuleApplyConfiguration { + b.Rule = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *ValidationRuleApplyConfiguration) WithMessage(value string) *ValidationRuleApplyConfiguration { + b.Message = &value + return b +} + +// WithMessageExpression sets the MessageExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MessageExpression field is set to the value of the last call. +func (b *ValidationRuleApplyConfiguration) WithMessageExpression(value string) *ValidationRuleApplyConfiguration { + b.MessageExpression = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookclientconfig.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookclientconfig.go new file mode 100644 index 00000000..aa358ae2 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookclientconfig.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// with apply. +type WebhookClientConfigApplyConfiguration struct { + URL *string `json:"url,omitempty"` + Service *ServiceReferenceApplyConfiguration `json:"service,omitempty"` + CABundle []byte `json:"caBundle,omitempty"` +} + +// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// apply. +func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { + return &WebhookClientConfigApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *WebhookClientConfigApplyConfiguration) WithURL(value string) *WebhookClientConfigApplyConfiguration { + b.URL = &value + return b +} + +// WithService sets the Service field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Service field is set to the value of the last call. +func (b *WebhookClientConfigApplyConfiguration) WithService(value *ServiceReferenceApplyConfiguration) *WebhookClientConfigApplyConfiguration { + b.Service = value + return b +} + +// WithCABundle adds the given value to the CABundle field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CABundle field. +func (b *WebhookClientConfigApplyConfiguration) WithCABundle(values ...byte) *WebhookClientConfigApplyConfiguration { + for i := range values { + b.CABundle = append(b.CABundle, values[i]) + } + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookconversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookconversion.go new file mode 100644 index 00000000..2af1b70b --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookconversion.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// WebhookConversionApplyConfiguration represents an declarative configuration of the WebhookConversion type for use +// with apply. +type WebhookConversionApplyConfiguration struct { + ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"` + ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty"` +} + +// WebhookConversionApplyConfiguration constructs an declarative configuration of the WebhookConversion type for use with +// apply. +func WebhookConversion() *WebhookConversionApplyConfiguration { + return &WebhookConversionApplyConfiguration{} +} + +// WithClientConfig sets the ClientConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientConfig field is set to the value of the last call. +func (b *WebhookConversionApplyConfiguration) WithClientConfig(value *WebhookClientConfigApplyConfiguration) *WebhookConversionApplyConfiguration { + b.ClientConfig = value + return b +} + +// WithConversionReviewVersions adds the given value to the ConversionReviewVersions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ConversionReviewVersions field. +func (b *WebhookConversionApplyConfiguration) WithConversionReviewVersions(values ...string) *WebhookConversionApplyConfiguration { + for i := range values { + b.ConversionReviewVersions = append(b.ConversionReviewVersions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcecolumndefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcecolumndefinition.go new file mode 100644 index 00000000..e4731784 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcecolumndefinition.go @@ -0,0 +1,84 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// CustomResourceColumnDefinitionApplyConfiguration represents an declarative configuration of the CustomResourceColumnDefinition type for use +// with apply. +type CustomResourceColumnDefinitionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Format *string `json:"format,omitempty"` + Description *string `json:"description,omitempty"` + Priority *int32 `json:"priority,omitempty"` + JSONPath *string `json:"JSONPath,omitempty"` +} + +// CustomResourceColumnDefinitionApplyConfiguration constructs an declarative configuration of the CustomResourceColumnDefinition type for use with +// apply. +func CustomResourceColumnDefinition() *CustomResourceColumnDefinitionApplyConfiguration { + return &CustomResourceColumnDefinitionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CustomResourceColumnDefinitionApplyConfiguration) WithName(value string) *CustomResourceColumnDefinitionApplyConfiguration { + b.Name = &value + return b +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *CustomResourceColumnDefinitionApplyConfiguration) WithType(value string) *CustomResourceColumnDefinitionApplyConfiguration { + b.Type = &value + return b +} + +// WithFormat sets the Format field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Format field is set to the value of the last call. +func (b *CustomResourceColumnDefinitionApplyConfiguration) WithFormat(value string) *CustomResourceColumnDefinitionApplyConfiguration { + b.Format = &value + return b +} + +// WithDescription sets the Description field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Description field is set to the value of the last call. +func (b *CustomResourceColumnDefinitionApplyConfiguration) WithDescription(value string) *CustomResourceColumnDefinitionApplyConfiguration { + b.Description = &value + return b +} + +// WithPriority sets the Priority field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Priority field is set to the value of the last call. +func (b *CustomResourceColumnDefinitionApplyConfiguration) WithPriority(value int32) *CustomResourceColumnDefinitionApplyConfiguration { + b.Priority = &value + return b +} + +// WithJSONPath sets the JSONPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JSONPath field is set to the value of the last call. +func (b *CustomResourceColumnDefinitionApplyConfiguration) WithJSONPath(value string) *CustomResourceColumnDefinitionApplyConfiguration { + b.JSONPath = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go new file mode 100644 index 00000000..d9825f85 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" +) + +// CustomResourceConversionApplyConfiguration represents an declarative configuration of the CustomResourceConversion type for use +// with apply. +type CustomResourceConversionApplyConfiguration struct { + Strategy *v1beta1.ConversionStrategyType `json:"strategy,omitempty"` + WebhookClientConfig *WebhookClientConfigApplyConfiguration `json:"webhookClientConfig,omitempty"` + ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty"` +} + +// CustomResourceConversionApplyConfiguration constructs an declarative configuration of the CustomResourceConversion type for use with +// apply. +func CustomResourceConversion() *CustomResourceConversionApplyConfiguration { + return &CustomResourceConversionApplyConfiguration{} +} + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *CustomResourceConversionApplyConfiguration) WithStrategy(value v1beta1.ConversionStrategyType) *CustomResourceConversionApplyConfiguration { + b.Strategy = &value + return b +} + +// WithWebhookClientConfig sets the WebhookClientConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WebhookClientConfig field is set to the value of the last call. +func (b *CustomResourceConversionApplyConfiguration) WithWebhookClientConfig(value *WebhookClientConfigApplyConfiguration) *CustomResourceConversionApplyConfiguration { + b.WebhookClientConfig = value + return b +} + +// WithConversionReviewVersions adds the given value to the ConversionReviewVersions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ConversionReviewVersions field. +func (b *CustomResourceConversionApplyConfiguration) WithConversionReviewVersions(values ...string) *CustomResourceConversionApplyConfiguration { + for i := range values { + b.ConversionReviewVersions = append(b.ConversionReviewVersions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go new file mode 100644 index 00000000..9117748c --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go @@ -0,0 +1,218 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// CustomResourceDefinitionApplyConfiguration represents an declarative configuration of the CustomResourceDefinition type for use +// with apply. +type CustomResourceDefinitionApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CustomResourceDefinitionSpecApplyConfiguration `json:"spec,omitempty"` + Status *CustomResourceDefinitionStatusApplyConfiguration `json:"status,omitempty"` +} + +// CustomResourceDefinition constructs an declarative configuration of the CustomResourceDefinition type for use with +// apply. +func CustomResourceDefinition(name string) *CustomResourceDefinitionApplyConfiguration { + b := &CustomResourceDefinitionApplyConfiguration{} + b.WithName(name) + b.WithKind("CustomResourceDefinition") + b.WithAPIVersion("apiextensions.k8s.io/v1beta1") + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithKind(value string) *CustomResourceDefinitionApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithAPIVersion(value string) *CustomResourceDefinitionApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithName(value string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithGenerateName(value string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithNamespace(value string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithUID(value types.UID) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithResourceVersion(value string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithGeneration(value int64) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *CustomResourceDefinitionApplyConfiguration) WithLabels(entries map[string]string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *CustomResourceDefinitionApplyConfiguration) WithAnnotations(entries map[string]string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *CustomResourceDefinitionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *CustomResourceDefinitionApplyConfiguration) WithFinalizers(values ...string) *CustomResourceDefinitionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *CustomResourceDefinitionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithSpec(value *CustomResourceDefinitionSpecApplyConfiguration) *CustomResourceDefinitionApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *CustomResourceDefinitionApplyConfiguration) WithStatus(value *CustomResourceDefinitionStatusApplyConfiguration) *CustomResourceDefinitionApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go new file mode 100644 index 00000000..cf2400c9 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CustomResourceDefinitionConditionApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionCondition type for use +// with apply. +type CustomResourceDefinitionConditionApplyConfiguration struct { + Type *v1beta1.CustomResourceDefinitionConditionType `json:"type,omitempty"` + Status *v1beta1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// CustomResourceDefinitionConditionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionCondition type for use with +// apply. +func CustomResourceDefinitionCondition() *CustomResourceDefinitionConditionApplyConfiguration { + return &CustomResourceDefinitionConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value v1beta1.CustomResourceDefinitionConditionType) *CustomResourceDefinitionConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithStatus(value v1beta1.ConditionStatus) *CustomResourceDefinitionConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *CustomResourceDefinitionConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithReason(value string) *CustomResourceDefinitionConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithMessage(value string) *CustomResourceDefinitionConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionnames.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionnames.go new file mode 100644 index 00000000..a2020072 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionnames.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// CustomResourceDefinitionNamesApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionNames type for use +// with apply. +type CustomResourceDefinitionNamesApplyConfiguration struct { + Plural *string `json:"plural,omitempty"` + Singular *string `json:"singular,omitempty"` + ShortNames []string `json:"shortNames,omitempty"` + Kind *string `json:"kind,omitempty"` + ListKind *string `json:"listKind,omitempty"` + Categories []string `json:"categories,omitempty"` +} + +// CustomResourceDefinitionNamesApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionNames type for use with +// apply. +func CustomResourceDefinitionNames() *CustomResourceDefinitionNamesApplyConfiguration { + return &CustomResourceDefinitionNamesApplyConfiguration{} +} + +// WithPlural sets the Plural field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Plural field is set to the value of the last call. +func (b *CustomResourceDefinitionNamesApplyConfiguration) WithPlural(value string) *CustomResourceDefinitionNamesApplyConfiguration { + b.Plural = &value + return b +} + +// WithSingular sets the Singular field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Singular field is set to the value of the last call. +func (b *CustomResourceDefinitionNamesApplyConfiguration) WithSingular(value string) *CustomResourceDefinitionNamesApplyConfiguration { + b.Singular = &value + return b +} + +// WithShortNames adds the given value to the ShortNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ShortNames field. +func (b *CustomResourceDefinitionNamesApplyConfiguration) WithShortNames(values ...string) *CustomResourceDefinitionNamesApplyConfiguration { + for i := range values { + b.ShortNames = append(b.ShortNames, values[i]) + } + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *CustomResourceDefinitionNamesApplyConfiguration) WithKind(value string) *CustomResourceDefinitionNamesApplyConfiguration { + b.Kind = &value + return b +} + +// WithListKind sets the ListKind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ListKind field is set to the value of the last call. +func (b *CustomResourceDefinitionNamesApplyConfiguration) WithListKind(value string) *CustomResourceDefinitionNamesApplyConfiguration { + b.ListKind = &value + return b +} + +// WithCategories adds the given value to the Categories field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Categories field. +func (b *CustomResourceDefinitionNamesApplyConfiguration) WithCategories(values ...string) *CustomResourceDefinitionNamesApplyConfiguration { + for i := range values { + b.Categories = append(b.Categories, values[i]) + } + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go new file mode 100644 index 00000000..f8c29037 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go @@ -0,0 +1,134 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" +) + +// CustomResourceDefinitionSpecApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionSpec type for use +// with apply. +type CustomResourceDefinitionSpecApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Version *string `json:"version,omitempty"` + Names *CustomResourceDefinitionNamesApplyConfiguration `json:"names,omitempty"` + Scope *apiextensionsv1beta1.ResourceScope `json:"scope,omitempty"` + Validation *CustomResourceValidationApplyConfiguration `json:"validation,omitempty"` + Subresources *CustomResourceSubresourcesApplyConfiguration `json:"subresources,omitempty"` + Versions []CustomResourceDefinitionVersionApplyConfiguration `json:"versions,omitempty"` + AdditionalPrinterColumns []CustomResourceColumnDefinitionApplyConfiguration `json:"additionalPrinterColumns,omitempty"` + Conversion *CustomResourceConversionApplyConfiguration `json:"conversion,omitempty"` + PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty"` +} + +// CustomResourceDefinitionSpecApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionSpec type for use with +// apply. +func CustomResourceDefinitionSpec() *CustomResourceDefinitionSpecApplyConfiguration { + return &CustomResourceDefinitionSpecApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithGroup(value string) *CustomResourceDefinitionSpecApplyConfiguration { + b.Group = &value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithVersion(value string) *CustomResourceDefinitionSpecApplyConfiguration { + b.Version = &value + return b +} + +// WithNames sets the Names field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Names field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithNames(value *CustomResourceDefinitionNamesApplyConfiguration) *CustomResourceDefinitionSpecApplyConfiguration { + b.Names = value + return b +} + +// WithScope sets the Scope field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scope field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithScope(value apiextensionsv1beta1.ResourceScope) *CustomResourceDefinitionSpecApplyConfiguration { + b.Scope = &value + return b +} + +// WithValidation sets the Validation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Validation field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithValidation(value *CustomResourceValidationApplyConfiguration) *CustomResourceDefinitionSpecApplyConfiguration { + b.Validation = value + return b +} + +// WithSubresources sets the Subresources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Subresources field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithSubresources(value *CustomResourceSubresourcesApplyConfiguration) *CustomResourceDefinitionSpecApplyConfiguration { + b.Subresources = value + return b +} + +// WithVersions adds the given value to the Versions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Versions field. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithVersions(values ...*CustomResourceDefinitionVersionApplyConfiguration) *CustomResourceDefinitionSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVersions") + } + b.Versions = append(b.Versions, *values[i]) + } + return b +} + +// WithAdditionalPrinterColumns adds the given value to the AdditionalPrinterColumns field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalPrinterColumns field. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithAdditionalPrinterColumns(values ...*CustomResourceColumnDefinitionApplyConfiguration) *CustomResourceDefinitionSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAdditionalPrinterColumns") + } + b.AdditionalPrinterColumns = append(b.AdditionalPrinterColumns, *values[i]) + } + return b +} + +// WithConversion sets the Conversion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Conversion field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithConversion(value *CustomResourceConversionApplyConfiguration) *CustomResourceDefinitionSpecApplyConfiguration { + b.Conversion = value + return b +} + +// WithPreserveUnknownFields sets the PreserveUnknownFields field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreserveUnknownFields field is set to the value of the last call. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithPreserveUnknownFields(value bool) *CustomResourceDefinitionSpecApplyConfiguration { + b.PreserveUnknownFields = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionstatus.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionstatus.go new file mode 100644 index 00000000..79b2ebda --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionstatus.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// CustomResourceDefinitionStatusApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionStatus type for use +// with apply. +type CustomResourceDefinitionStatusApplyConfiguration struct { + Conditions []CustomResourceDefinitionConditionApplyConfiguration `json:"conditions,omitempty"` + AcceptedNames *CustomResourceDefinitionNamesApplyConfiguration `json:"acceptedNames,omitempty"` + StoredVersions []string `json:"storedVersions,omitempty"` +} + +// CustomResourceDefinitionStatusApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionStatus type for use with +// apply. +func CustomResourceDefinitionStatus() *CustomResourceDefinitionStatusApplyConfiguration { + return &CustomResourceDefinitionStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *CustomResourceDefinitionStatusApplyConfiguration) WithConditions(values ...*CustomResourceDefinitionConditionApplyConfiguration) *CustomResourceDefinitionStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithAcceptedNames sets the AcceptedNames field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AcceptedNames field is set to the value of the last call. +func (b *CustomResourceDefinitionStatusApplyConfiguration) WithAcceptedNames(value *CustomResourceDefinitionNamesApplyConfiguration) *CustomResourceDefinitionStatusApplyConfiguration { + b.AcceptedNames = value + return b +} + +// WithStoredVersions adds the given value to the StoredVersions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the StoredVersions field. +func (b *CustomResourceDefinitionStatusApplyConfiguration) WithStoredVersions(values ...string) *CustomResourceDefinitionStatusApplyConfiguration { + for i := range values { + b.StoredVersions = append(b.StoredVersions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go new file mode 100644 index 00000000..605a9f0a --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// CustomResourceDefinitionVersionApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionVersion type for use +// with apply. +type CustomResourceDefinitionVersionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Served *bool `json:"served,omitempty"` + Storage *bool `json:"storage,omitempty"` + Deprecated *bool `json:"deprecated,omitempty"` + DeprecationWarning *string `json:"deprecationWarning,omitempty"` + Schema *CustomResourceValidationApplyConfiguration `json:"schema,omitempty"` + Subresources *CustomResourceSubresourcesApplyConfiguration `json:"subresources,omitempty"` + AdditionalPrinterColumns []CustomResourceColumnDefinitionApplyConfiguration `json:"additionalPrinterColumns,omitempty"` +} + +// CustomResourceDefinitionVersionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionVersion type for use with +// apply. +func CustomResourceDefinitionVersion() *CustomResourceDefinitionVersionApplyConfiguration { + return &CustomResourceDefinitionVersionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithName(value string) *CustomResourceDefinitionVersionApplyConfiguration { + b.Name = &value + return b +} + +// WithServed sets the Served field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Served field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithServed(value bool) *CustomResourceDefinitionVersionApplyConfiguration { + b.Served = &value + return b +} + +// WithStorage sets the Storage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Storage field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithStorage(value bool) *CustomResourceDefinitionVersionApplyConfiguration { + b.Storage = &value + return b +} + +// WithDeprecated sets the Deprecated field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Deprecated field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithDeprecated(value bool) *CustomResourceDefinitionVersionApplyConfiguration { + b.Deprecated = &value + return b +} + +// WithDeprecationWarning sets the DeprecationWarning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeprecationWarning field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithDeprecationWarning(value string) *CustomResourceDefinitionVersionApplyConfiguration { + b.DeprecationWarning = &value + return b +} + +// WithSchema sets the Schema field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Schema field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithSchema(value *CustomResourceValidationApplyConfiguration) *CustomResourceDefinitionVersionApplyConfiguration { + b.Schema = value + return b +} + +// WithSubresources sets the Subresources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Subresources field is set to the value of the last call. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithSubresources(value *CustomResourceSubresourcesApplyConfiguration) *CustomResourceDefinitionVersionApplyConfiguration { + b.Subresources = value + return b +} + +// WithAdditionalPrinterColumns adds the given value to the AdditionalPrinterColumns field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalPrinterColumns field. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithAdditionalPrinterColumns(values ...*CustomResourceColumnDefinitionApplyConfiguration) *CustomResourceDefinitionVersionApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAdditionalPrinterColumns") + } + b.AdditionalPrinterColumns = append(b.AdditionalPrinterColumns, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go new file mode 100644 index 00000000..a62f8a20 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" +) + +// CustomResourceSubresourcesApplyConfiguration represents an declarative configuration of the CustomResourceSubresources type for use +// with apply. +type CustomResourceSubresourcesApplyConfiguration struct { + Status *v1beta1.CustomResourceSubresourceStatus `json:"status,omitempty"` + Scale *CustomResourceSubresourceScaleApplyConfiguration `json:"scale,omitempty"` +} + +// CustomResourceSubresourcesApplyConfiguration constructs an declarative configuration of the CustomResourceSubresources type for use with +// apply. +func CustomResourceSubresources() *CustomResourceSubresourcesApplyConfiguration { + return &CustomResourceSubresourcesApplyConfiguration{} +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *CustomResourceSubresourcesApplyConfiguration) WithStatus(value v1beta1.CustomResourceSubresourceStatus) *CustomResourceSubresourcesApplyConfiguration { + b.Status = &value + return b +} + +// WithScale sets the Scale field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scale field is set to the value of the last call. +func (b *CustomResourceSubresourcesApplyConfiguration) WithScale(value *CustomResourceSubresourceScaleApplyConfiguration) *CustomResourceSubresourcesApplyConfiguration { + b.Scale = value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresourcescale.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresourcescale.go new file mode 100644 index 00000000..72934ce9 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresourcescale.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// CustomResourceSubresourceScaleApplyConfiguration represents an declarative configuration of the CustomResourceSubresourceScale type for use +// with apply. +type CustomResourceSubresourceScaleApplyConfiguration struct { + SpecReplicasPath *string `json:"specReplicasPath,omitempty"` + StatusReplicasPath *string `json:"statusReplicasPath,omitempty"` + LabelSelectorPath *string `json:"labelSelectorPath,omitempty"` +} + +// CustomResourceSubresourceScaleApplyConfiguration constructs an declarative configuration of the CustomResourceSubresourceScale type for use with +// apply. +func CustomResourceSubresourceScale() *CustomResourceSubresourceScaleApplyConfiguration { + return &CustomResourceSubresourceScaleApplyConfiguration{} +} + +// WithSpecReplicasPath sets the SpecReplicasPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SpecReplicasPath field is set to the value of the last call. +func (b *CustomResourceSubresourceScaleApplyConfiguration) WithSpecReplicasPath(value string) *CustomResourceSubresourceScaleApplyConfiguration { + b.SpecReplicasPath = &value + return b +} + +// WithStatusReplicasPath sets the StatusReplicasPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StatusReplicasPath field is set to the value of the last call. +func (b *CustomResourceSubresourceScaleApplyConfiguration) WithStatusReplicasPath(value string) *CustomResourceSubresourceScaleApplyConfiguration { + b.StatusReplicasPath = &value + return b +} + +// WithLabelSelectorPath sets the LabelSelectorPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelSelectorPath field is set to the value of the last call. +func (b *CustomResourceSubresourceScaleApplyConfiguration) WithLabelSelectorPath(value string) *CustomResourceSubresourceScaleApplyConfiguration { + b.LabelSelectorPath = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcevalidation.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcevalidation.go new file mode 100644 index 00000000..9f65653d --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcevalidation.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// CustomResourceValidationApplyConfiguration represents an declarative configuration of the CustomResourceValidation type for use +// with apply. +type CustomResourceValidationApplyConfiguration struct { + OpenAPIV3Schema *JSONSchemaPropsApplyConfiguration `json:"openAPIV3Schema,omitempty"` +} + +// CustomResourceValidationApplyConfiguration constructs an declarative configuration of the CustomResourceValidation type for use with +// apply. +func CustomResourceValidation() *CustomResourceValidationApplyConfiguration { + return &CustomResourceValidationApplyConfiguration{} +} + +// WithOpenAPIV3Schema sets the OpenAPIV3Schema field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OpenAPIV3Schema field is set to the value of the last call. +func (b *CustomResourceValidationApplyConfiguration) WithOpenAPIV3Schema(value *JSONSchemaPropsApplyConfiguration) *CustomResourceValidationApplyConfiguration { + b.OpenAPIV3Schema = value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/externaldocumentation.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/externaldocumentation.go new file mode 100644 index 00000000..360f6d96 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/externaldocumentation.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ExternalDocumentationApplyConfiguration represents an declarative configuration of the ExternalDocumentation type for use +// with apply. +type ExternalDocumentationApplyConfiguration struct { + Description *string `json:"description,omitempty"` + URL *string `json:"url,omitempty"` +} + +// ExternalDocumentationApplyConfiguration constructs an declarative configuration of the ExternalDocumentation type for use with +// apply. +func ExternalDocumentation() *ExternalDocumentationApplyConfiguration { + return &ExternalDocumentationApplyConfiguration{} +} + +// WithDescription sets the Description field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Description field is set to the value of the last call. +func (b *ExternalDocumentationApplyConfiguration) WithDescription(value string) *ExternalDocumentationApplyConfiguration { + b.Description = &value + return b +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *ExternalDocumentationApplyConfiguration) WithURL(value string) *ExternalDocumentationApplyConfiguration { + b.URL = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go new file mode 100644 index 00000000..158b5750 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go @@ -0,0 +1,463 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" +) + +// JSONSchemaPropsApplyConfiguration represents an declarative configuration of the JSONSchemaProps type for use +// with apply. +type JSONSchemaPropsApplyConfiguration struct { + ID *string `json:"id,omitempty"` + Schema *v1beta1.JSONSchemaURL `json:"$schema,omitempty"` + Ref *string `json:"$ref,omitempty"` + Description *string `json:"description,omitempty"` + Type *string `json:"type,omitempty"` + Format *string `json:"format,omitempty"` + Title *string `json:"title,omitempty"` + Default *v1beta1.JSON `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum *bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern *string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems *bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []v1beta1.JSON `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *v1beta1.JSONSchemaPropsOrArray `json:"items,omitempty"` + AllOf []JSONSchemaPropsApplyConfiguration `json:"allOf,omitempty"` + OneOf []JSONSchemaPropsApplyConfiguration `json:"oneOf,omitempty"` + AnyOf []JSONSchemaPropsApplyConfiguration `json:"anyOf,omitempty"` + Not *JSONSchemaPropsApplyConfiguration `json:"not,omitempty"` + Properties map[string]JSONSchemaPropsApplyConfiguration `json:"properties,omitempty"` + AdditionalProperties *v1beta1.JSONSchemaPropsOrBool `json:"additionalProperties,omitempty"` + PatternProperties map[string]JSONSchemaPropsApplyConfiguration `json:"patternProperties,omitempty"` + Dependencies *v1beta1.JSONSchemaDependencies `json:"dependencies,omitempty"` + AdditionalItems *v1beta1.JSONSchemaPropsOrBool `json:"additionalItems,omitempty"` + Definitions *v1beta1.JSONSchemaDefinitions `json:"definitions,omitempty"` + ExternalDocs *ExternalDocumentationApplyConfiguration `json:"externalDocs,omitempty"` + Example *v1beta1.JSON `json:"example,omitempty"` + Nullable *bool `json:"nullable,omitempty"` + XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty"` + XEmbeddedResource *bool `json:"x-kubernetes-embedded-resource,omitempty"` + XIntOrString *bool `json:"x-kubernetes-int-or-string,omitempty"` + XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty"` + XListType *string `json:"x-kubernetes-list-type,omitempty"` + XMapType *string `json:"x-kubernetes-map-type,omitempty"` + XValidations *v1beta1.ValidationRules `json:"x-kubernetes-validations,omitempty"` +} + +// JSONSchemaPropsApplyConfiguration constructs an declarative configuration of the JSONSchemaProps type for use with +// apply. +func JSONSchemaProps() *JSONSchemaPropsApplyConfiguration { + return &JSONSchemaPropsApplyConfiguration{} +} + +// WithID sets the ID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ID field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithID(value string) *JSONSchemaPropsApplyConfiguration { + b.ID = &value + return b +} + +// WithSchema sets the Schema field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Schema field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithSchema(value v1beta1.JSONSchemaURL) *JSONSchemaPropsApplyConfiguration { + b.Schema = &value + return b +} + +// WithRef sets the Ref field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ref field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithRef(value string) *JSONSchemaPropsApplyConfiguration { + b.Ref = &value + return b +} + +// WithDescription sets the Description field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Description field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithDescription(value string) *JSONSchemaPropsApplyConfiguration { + b.Description = &value + return b +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithType(value string) *JSONSchemaPropsApplyConfiguration { + b.Type = &value + return b +} + +// WithFormat sets the Format field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Format field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithFormat(value string) *JSONSchemaPropsApplyConfiguration { + b.Format = &value + return b +} + +// WithTitle sets the Title field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Title field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithTitle(value string) *JSONSchemaPropsApplyConfiguration { + b.Title = &value + return b +} + +// WithDefault sets the Default field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Default field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithDefault(value v1beta1.JSON) *JSONSchemaPropsApplyConfiguration { + b.Default = &value + return b +} + +// WithMaximum sets the Maximum field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Maximum field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMaximum(value float64) *JSONSchemaPropsApplyConfiguration { + b.Maximum = &value + return b +} + +// WithExclusiveMaximum sets the ExclusiveMaximum field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExclusiveMaximum field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithExclusiveMaximum(value bool) *JSONSchemaPropsApplyConfiguration { + b.ExclusiveMaximum = &value + return b +} + +// WithMinimum sets the Minimum field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Minimum field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMinimum(value float64) *JSONSchemaPropsApplyConfiguration { + b.Minimum = &value + return b +} + +// WithExclusiveMinimum sets the ExclusiveMinimum field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExclusiveMinimum field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithExclusiveMinimum(value bool) *JSONSchemaPropsApplyConfiguration { + b.ExclusiveMinimum = &value + return b +} + +// WithMaxLength sets the MaxLength field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxLength field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMaxLength(value int64) *JSONSchemaPropsApplyConfiguration { + b.MaxLength = &value + return b +} + +// WithMinLength sets the MinLength field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinLength field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMinLength(value int64) *JSONSchemaPropsApplyConfiguration { + b.MinLength = &value + return b +} + +// WithPattern sets the Pattern field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pattern field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithPattern(value string) *JSONSchemaPropsApplyConfiguration { + b.Pattern = &value + return b +} + +// WithMaxItems sets the MaxItems field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxItems field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMaxItems(value int64) *JSONSchemaPropsApplyConfiguration { + b.MaxItems = &value + return b +} + +// WithMinItems sets the MinItems field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinItems field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMinItems(value int64) *JSONSchemaPropsApplyConfiguration { + b.MinItems = &value + return b +} + +// WithUniqueItems sets the UniqueItems field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UniqueItems field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithUniqueItems(value bool) *JSONSchemaPropsApplyConfiguration { + b.UniqueItems = &value + return b +} + +// WithMultipleOf sets the MultipleOf field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MultipleOf field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMultipleOf(value float64) *JSONSchemaPropsApplyConfiguration { + b.MultipleOf = &value + return b +} + +// WithEnum adds the given value to the Enum field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Enum field. +func (b *JSONSchemaPropsApplyConfiguration) WithEnum(values ...v1beta1.JSON) *JSONSchemaPropsApplyConfiguration { + for i := range values { + b.Enum = append(b.Enum, values[i]) + } + return b +} + +// WithMaxProperties sets the MaxProperties field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxProperties field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMaxProperties(value int64) *JSONSchemaPropsApplyConfiguration { + b.MaxProperties = &value + return b +} + +// WithMinProperties sets the MinProperties field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinProperties field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithMinProperties(value int64) *JSONSchemaPropsApplyConfiguration { + b.MinProperties = &value + return b +} + +// WithRequired adds the given value to the Required field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Required field. +func (b *JSONSchemaPropsApplyConfiguration) WithRequired(values ...string) *JSONSchemaPropsApplyConfiguration { + for i := range values { + b.Required = append(b.Required, values[i]) + } + return b +} + +// WithItems sets the Items field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Items field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithItems(value v1beta1.JSONSchemaPropsOrArray) *JSONSchemaPropsApplyConfiguration { + b.Items = &value + return b +} + +// WithAllOf adds the given value to the AllOf field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllOf field. +func (b *JSONSchemaPropsApplyConfiguration) WithAllOf(values ...*JSONSchemaPropsApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAllOf") + } + b.AllOf = append(b.AllOf, *values[i]) + } + return b +} + +// WithOneOf adds the given value to the OneOf field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OneOf field. +func (b *JSONSchemaPropsApplyConfiguration) WithOneOf(values ...*JSONSchemaPropsApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOneOf") + } + b.OneOf = append(b.OneOf, *values[i]) + } + return b +} + +// WithAnyOf adds the given value to the AnyOf field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AnyOf field. +func (b *JSONSchemaPropsApplyConfiguration) WithAnyOf(values ...*JSONSchemaPropsApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAnyOf") + } + b.AnyOf = append(b.AnyOf, *values[i]) + } + return b +} + +// WithNot sets the Not field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Not field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithNot(value *JSONSchemaPropsApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + b.Not = value + return b +} + +// WithProperties puts the entries into the Properties field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Properties field, +// overwriting an existing map entries in Properties field with the same key. +func (b *JSONSchemaPropsApplyConfiguration) WithProperties(entries map[string]JSONSchemaPropsApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + if b.Properties == nil && len(entries) > 0 { + b.Properties = make(map[string]JSONSchemaPropsApplyConfiguration, len(entries)) + } + for k, v := range entries { + b.Properties[k] = v + } + return b +} + +// WithAdditionalProperties sets the AdditionalProperties field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdditionalProperties field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalProperties(value v1beta1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { + b.AdditionalProperties = &value + return b +} + +// WithPatternProperties puts the entries into the PatternProperties field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the PatternProperties field, +// overwriting an existing map entries in PatternProperties field with the same key. +func (b *JSONSchemaPropsApplyConfiguration) WithPatternProperties(entries map[string]JSONSchemaPropsApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + if b.PatternProperties == nil && len(entries) > 0 { + b.PatternProperties = make(map[string]JSONSchemaPropsApplyConfiguration, len(entries)) + } + for k, v := range entries { + b.PatternProperties[k] = v + } + return b +} + +// WithDependencies sets the Dependencies field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Dependencies field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value v1beta1.JSONSchemaDependencies) *JSONSchemaPropsApplyConfiguration { + b.Dependencies = &value + return b +} + +// WithAdditionalItems sets the AdditionalItems field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdditionalItems field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value v1beta1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { + b.AdditionalItems = &value + return b +} + +// WithDefinitions sets the Definitions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Definitions field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithDefinitions(value v1beta1.JSONSchemaDefinitions) *JSONSchemaPropsApplyConfiguration { + b.Definitions = &value + return b +} + +// WithExternalDocs sets the ExternalDocs field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExternalDocs field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithExternalDocs(value *ExternalDocumentationApplyConfiguration) *JSONSchemaPropsApplyConfiguration { + b.ExternalDocs = value + return b +} + +// WithExample sets the Example field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Example field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithExample(value v1beta1.JSON) *JSONSchemaPropsApplyConfiguration { + b.Example = &value + return b +} + +// WithNullable sets the Nullable field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Nullable field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithNullable(value bool) *JSONSchemaPropsApplyConfiguration { + b.Nullable = &value + return b +} + +// WithXPreserveUnknownFields sets the XPreserveUnknownFields field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XPreserveUnknownFields field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithXPreserveUnknownFields(value bool) *JSONSchemaPropsApplyConfiguration { + b.XPreserveUnknownFields = &value + return b +} + +// WithXEmbeddedResource sets the XEmbeddedResource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XEmbeddedResource field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithXEmbeddedResource(value bool) *JSONSchemaPropsApplyConfiguration { + b.XEmbeddedResource = &value + return b +} + +// WithXIntOrString sets the XIntOrString field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XIntOrString field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithXIntOrString(value bool) *JSONSchemaPropsApplyConfiguration { + b.XIntOrString = &value + return b +} + +// WithXListMapKeys adds the given value to the XListMapKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the XListMapKeys field. +func (b *JSONSchemaPropsApplyConfiguration) WithXListMapKeys(values ...string) *JSONSchemaPropsApplyConfiguration { + for i := range values { + b.XListMapKeys = append(b.XListMapKeys, values[i]) + } + return b +} + +// WithXListType sets the XListType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XListType field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithXListType(value string) *JSONSchemaPropsApplyConfiguration { + b.XListType = &value + return b +} + +// WithXMapType sets the XMapType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XMapType field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithXMapType(value string) *JSONSchemaPropsApplyConfiguration { + b.XMapType = &value + return b +} + +// WithXValidations sets the XValidations field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XValidations field is set to the value of the last call. +func (b *JSONSchemaPropsApplyConfiguration) WithXValidations(value v1beta1.ValidationRules) *JSONSchemaPropsApplyConfiguration { + b.XValidations = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/servicereference.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/servicereference.go new file mode 100644 index 00000000..c21b5749 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/servicereference.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// with apply. +type ServiceReferenceApplyConfiguration struct { + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + Path *string `json:"path,omitempty"` + Port *int32 `json:"port,omitempty"` +} + +// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// apply. +func ServiceReference() *ServiceReferenceApplyConfiguration { + return &ServiceReferenceApplyConfiguration{} +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceReferenceApplyConfiguration) WithNamespace(value string) *ServiceReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceReferenceApplyConfiguration) WithName(value string) *ServiceReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *ServiceReferenceApplyConfiguration) WithPath(value string) *ServiceReferenceApplyConfiguration { + b.Path = &value + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *ServiceReferenceApplyConfiguration) WithPort(value int32) *ServiceReferenceApplyConfiguration { + b.Port = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go new file mode 100644 index 00000000..de836e12 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ValidationRuleApplyConfiguration represents an declarative configuration of the ValidationRule type for use +// with apply. +type ValidationRuleApplyConfiguration struct { + Rule *string `json:"rule,omitempty"` + Message *string `json:"message,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` +} + +// ValidationRuleApplyConfiguration constructs an declarative configuration of the ValidationRule type for use with +// apply. +func ValidationRule() *ValidationRuleApplyConfiguration { + return &ValidationRuleApplyConfiguration{} +} + +// WithRule sets the Rule field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Rule field is set to the value of the last call. +func (b *ValidationRuleApplyConfiguration) WithRule(value string) *ValidationRuleApplyConfiguration { + b.Rule = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *ValidationRuleApplyConfiguration) WithMessage(value string) *ValidationRuleApplyConfiguration { + b.Message = &value + return b +} + +// WithMessageExpression sets the MessageExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MessageExpression field is set to the value of the last call. +func (b *ValidationRuleApplyConfiguration) WithMessageExpression(value string) *ValidationRuleApplyConfiguration { + b.MessageExpression = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/webhookclientconfig.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/webhookclientconfig.go new file mode 100644 index 00000000..490f9d5f --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/webhookclientconfig.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// with apply. +type WebhookClientConfigApplyConfiguration struct { + URL *string `json:"url,omitempty"` + Service *ServiceReferenceApplyConfiguration `json:"service,omitempty"` + CABundle []byte `json:"caBundle,omitempty"` +} + +// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// apply. +func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { + return &WebhookClientConfigApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *WebhookClientConfigApplyConfiguration) WithURL(value string) *WebhookClientConfigApplyConfiguration { + b.URL = &value + return b +} + +// WithService sets the Service field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Service field is set to the value of the last call. +func (b *WebhookClientConfigApplyConfiguration) WithService(value *ServiceReferenceApplyConfiguration) *WebhookClientConfigApplyConfiguration { + b.Service = value + return b +} + +// WithCABundle adds the given value to the CABundle field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CABundle field. +func (b *WebhookClientConfigApplyConfiguration) WithCABundle(values ...byte) *WebhookClientConfigApplyConfiguration { + for i := range values { + b.CABundle = append(b.CABundle, values[i]) + } + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go index 5569b12d..3949426c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go @@ -20,9 +20,12 @@ package v1 import ( "context" + json "encoding/json" + "fmt" "time" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1" scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -47,6 +50,8 @@ type CustomResourceDefinitionInterface interface { List(ctx context.Context, opts metav1.ListOptions) (*v1.CustomResourceDefinitionList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) + Apply(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) + ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) CustomResourceDefinitionExpansion } @@ -182,3 +187,57 @@ func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt t Into(result) return } + +// Apply takes the given apply declarative configuration, applies it and returns the applied customResourceDefinition. +func (c *customResourceDefinitions) Apply(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) { + if customResourceDefinition == nil { + return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(customResourceDefinition) + if err != nil { + return nil, err + } + name := customResourceDefinition.Name + if name == nil { + return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") + } + result = &v1.CustomResourceDefinition{} + err = c.client.Patch(types.ApplyPatchType). + Resource("customresourcedefinitions"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *customResourceDefinitions) ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) { + if customResourceDefinition == nil { + return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(customResourceDefinition) + if err != nil { + return nil, err + } + + name := customResourceDefinition.Name + if name == nil { + return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") + } + + result = &v1.CustomResourceDefinition{} + err = c.client.Patch(types.ApplyPatchType). + Resource("customresourcedefinitions"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go index 2d16ca70..0e5b482a 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go @@ -20,9 +20,12 @@ package v1beta1 import ( "context" + json "encoding/json" + "fmt" "time" v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1" scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -47,6 +50,8 @@ type CustomResourceDefinitionInterface interface { List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CustomResourceDefinitionList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) + Apply(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) + ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) CustomResourceDefinitionExpansion } @@ -182,3 +187,57 @@ func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt t Into(result) return } + +// Apply takes the given apply declarative configuration, applies it and returns the applied customResourceDefinition. +func (c *customResourceDefinitions) Apply(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) { + if customResourceDefinition == nil { + return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(customResourceDefinition) + if err != nil { + return nil, err + } + name := customResourceDefinition.Name + if name == nil { + return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") + } + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Patch(types.ApplyPatchType). + Resource("customresourcedefinitions"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *customResourceDefinitions) ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) { + if customResourceDefinition == nil { + return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(customResourceDefinition) + if err != nil { + return nil, err + } + + name := customResourceDefinition.Name + if name == nil { + return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") + } + + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Patch(types.ApplyPatchType). + Resource("customresourcedefinitions"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 899d3e8a..1bf6b06d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -40,8 +40,7 @@ var ( // IsListType returns true if the provided Object has a slice called Items. // TODO: Replace the code in this check with an interface comparison by -// -// creating and enforcing that lists implement a list accessor. +// creating and enforcing that lists implement a list accessor. func IsListType(obj runtime.Object) bool { switch t := obj.(type) { case runtime.Unstructured: diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go new file mode 100644 index 00000000..29c6a48b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go @@ -0,0 +1,38 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// SetListOptionsDefaults sets defaults on the provided ListOptions if applicable. +// +// TODO(#115478): once the watch-list fg is always on we register this function in the scheme (via AddTypeDefaultingFunc). +// TODO(#115478): when the function is registered in the scheme remove all callers of this method. +func SetListOptionsDefaults(obj *ListOptions, isWatchListFeatureEnabled bool) { + if !isWatchListFeatureEnabled { + return + } + if obj.SendInitialEvents != nil || len(obj.ResourceVersionMatch) != 0 { + return + } + legacy := obj.ResourceVersion == "" || obj.ResourceVersion == "0" + if obj.Watch && legacy { + turnOnInitialEvents := true + obj.SendInitialEvents = &turnOnInitialEvents + obj.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go index a49b5f2b..00d2b8c6 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -66,6 +66,31 @@ type ListOptions struct { // it does not recognize and will return a 410 error if the token can no longer be used because // it has expired. Continue string + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + SendInitialEvents *bool } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go index 6d212b84..a6552c27 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -115,6 +115,7 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue + out.SendInitialEvents = (*bool)(unsafe.Pointer(in.SendInitialEvents)) return nil } @@ -137,6 +138,7 @@ func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOption out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue + out.SendInitialEvents = (*bool)(unsafe.Pointer(in.SendInitialEvents)) return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index 6e1eac5c..af66a2ac 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -75,6 +75,11 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) { *out = new(int64) **out = **in } + if in.SendInitialEvents != nil { + in, out := &in.SendInitialEvents, &out.SendInitialEvents + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 7e00eb7d..1a641e7c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -1326,185 +1326,187 @@ func init() { } var fileDescriptor_cf52fa777ced5367 = []byte{ - // 2842 bytes of a gzipped FileDescriptorProto + // 2867 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47, 0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44, 0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d, 0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a, 0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88, - 0x0b, 0xdc, 0x41, 0x22, 0xc7, 0x20, 0x2e, 0x91, 0x40, 0xa3, 0xc4, 0x1c, 0x38, 0x22, 0xae, 0xbe, - 0x80, 0xea, 0xd1, 0xdd, 0xd5, 0xf3, 0x58, 0xf7, 0x64, 0x97, 0x88, 0xdb, 0xf4, 0xf7, 0xae, 0xaa, - 0xaf, 0xbe, 0x47, 0x7d, 0x03, 0x3b, 0xc7, 0xd7, 0x58, 0xdd, 0xf1, 0xd7, 0x8f, 0x7b, 0x07, 0x84, - 0x7a, 0x24, 0x20, 0x6c, 0xfd, 0x84, 0x78, 0xb6, 0x4f, 0xd7, 0x15, 0xc2, 0xea, 0x3a, 0x1d, 0xab, - 0x75, 0xe4, 0x78, 0x84, 0xf6, 0xd7, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0xeb, 0x1d, 0x12, 0x58, 0xeb, - 0x27, 0x57, 0xd6, 0xdb, 0xc4, 0x23, 0xd4, 0x0a, 0x88, 0x5d, 0xef, 0x52, 0x3f, 0xf0, 0xd1, 0x63, - 0x92, 0xab, 0xae, 0x73, 0xd5, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0x75, 0xce, 0x55, 0x3f, 0xb9, 0xb2, - 0xf2, 0x54, 0xdb, 0x09, 0x8e, 0x7a, 0x07, 0xf5, 0x96, 0xdf, 0x59, 0x6f, 0xfb, 0x6d, 0x7f, 0x5d, - 0x30, 0x1f, 0xf4, 0x0e, 0xc5, 0x97, 0xf8, 0x10, 0xbf, 0xa4, 0xd0, 0x95, 0x89, 0xa6, 0xd0, 0x9e, - 0x17, 0x38, 0x1d, 0x32, 0x6c, 0xc5, 0xca, 0xb3, 0xe7, 0x31, 0xb0, 0xd6, 0x11, 0xe9, 0x58, 0xc3, - 0x7c, 0xe6, 0x9f, 0xb3, 0x50, 0xd8, 0xd8, 0xdb, 0xbe, 0x49, 0xfd, 0x5e, 0x17, 0xad, 0x41, 0xce, - 0xb3, 0x3a, 0xa4, 0x6a, 0xac, 0x19, 0x97, 0x8b, 0x8d, 0xf2, 0x07, 0x83, 0xda, 0xcc, 0xe9, 0xa0, - 0x96, 0x7b, 0xd5, 0xea, 0x10, 0x2c, 0x30, 0xc8, 0x85, 0xc2, 0x09, 0xa1, 0xcc, 0xf1, 0x3d, 0x56, - 0xcd, 0xac, 0x65, 0x2f, 0x97, 0xae, 0xbe, 0x58, 0x4f, 0xb3, 0xfe, 0xba, 0x50, 0x70, 0x57, 0xb2, - 0x6e, 0xf9, 0xb4, 0xe9, 0xb0, 0x96, 0x7f, 0x42, 0x68, 0xbf, 0xb1, 0xa8, 0xb4, 0x14, 0x14, 0x92, - 0xe1, 0x48, 0x03, 0xfa, 0x91, 0x01, 0x8b, 0x5d, 0x4a, 0x0e, 0x09, 0xa5, 0xc4, 0x56, 0xf8, 0x6a, - 0x76, 0xcd, 0x78, 0x08, 0x6a, 0xab, 0x4a, 0xed, 0xe2, 0xde, 0x90, 0x7c, 0x3c, 0xa2, 0x11, 0xfd, - 0xda, 0x80, 0x15, 0x46, 0xe8, 0x09, 0xa1, 0x1b, 0xb6, 0x4d, 0x09, 0x63, 0x8d, 0xfe, 0xa6, 0xeb, - 0x10, 0x2f, 0xd8, 0xdc, 0x6e, 0x62, 0x56, 0xcd, 0x89, 0x7d, 0xf8, 0x7a, 0x3a, 0x83, 0xf6, 0x27, - 0xc9, 0x69, 0x98, 0xca, 0xa2, 0x95, 0x89, 0x24, 0x0c, 0xdf, 0xc7, 0x0c, 0xf3, 0x10, 0xca, 0xe1, - 0x41, 0xde, 0x72, 0x58, 0x80, 0xee, 0xc2, 0x6c, 0x9b, 0x7f, 0xb0, 0xaa, 0x21, 0x0c, 0xac, 0xa7, - 0x33, 0x30, 0x94, 0xd1, 0x98, 0x57, 0xf6, 0xcc, 0x8a, 0x4f, 0x86, 0x95, 0x34, 0xf3, 0x67, 0x39, - 0x28, 0x6d, 0xec, 0x6d, 0x63, 0xc2, 0xfc, 0x1e, 0x6d, 0x91, 0x14, 0x4e, 0x73, 0x0d, 0xca, 0xcc, - 0xf1, 0xda, 0x3d, 0xd7, 0xa2, 0x1c, 0x5a, 0x9d, 0x15, 0x94, 0xcb, 0x8a, 0xb2, 0xbc, 0xaf, 0xe1, - 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xd7, 0x6a, 0x11, 0xbb, 0x9a, 0x59, 0x33, 0x2e, - 0x17, 0x1a, 0x48, 0xf1, 0xc1, 0xab, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x51, 0xc8, 0x0b, 0x4b, 0xab, - 0x05, 0xa1, 0xa6, 0xa2, 0xc8, 0xf3, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x13, 0x30, 0xa7, 0xbc, 0xac, - 0x5a, 0x14, 0x64, 0x0b, 0x8a, 0x6c, 0x2e, 0x74, 0x83, 0x10, 0xcf, 0xd7, 0x77, 0xec, 0x78, 0xb6, - 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x38, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, 0xfc, 0x09, 0xa1, 0x07, - 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0x8d, 0x22, 0x37, 0x4d, 0xfc, - 0xc4, 0x52, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x9f, 0x06, 0x62, 0x79, 0xd5, 0xfc, 0x5a, 0xf6, 0x72, - 0xb1, 0x31, 0xcf, 0xd7, 0xbb, 0x1f, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x5b, 0x56, 0x40, 0xda, 0x3e, - 0x75, 0x08, 0xab, 0xce, 0xc5, 0xf4, 0x9b, 0x11, 0x14, 0x6b, 0x14, 0xe8, 0x65, 0x40, 0x2c, 0xf0, - 0xa9, 0xd5, 0x26, 0x6a, 0xa9, 0x2f, 0x59, 0xec, 0xa8, 0x0a, 0x62, 0x75, 0x2b, 0x6a, 0x75, 0x68, - 0x7f, 0x84, 0x02, 0x8f, 0xe1, 0x32, 0x7f, 0x67, 0xc0, 0x82, 0xe6, 0x0b, 0xc2, 0xef, 0xae, 0x41, - 0xb9, 0xad, 0xdd, 0x3a, 0xe5, 0x17, 0xd1, 0x69, 0xeb, 0x37, 0x12, 0x27, 0x28, 0x11, 0x81, 0x22, - 0x55, 0x92, 0xc2, 0xe8, 0x72, 0x25, 0xb5, 0xd3, 0x86, 0x36, 0xc4, 0x9a, 0x34, 0x20, 0xc3, 0xb1, - 0x64, 0xf3, 0x9f, 0x86, 0x70, 0xe0, 0x30, 0xde, 0xa0, 0xcb, 0x5a, 0x4c, 0x33, 0xc4, 0xf6, 0x95, - 0x27, 0xc4, 0xa3, 0x73, 0x02, 0x41, 0xe6, 0xff, 0x22, 0x10, 0x5c, 0x2f, 0xfc, 0xf2, 0xbd, 0xda, - 0xcc, 0xdb, 0x7f, 0x5f, 0x9b, 0x31, 0x7f, 0x61, 0x40, 0x79, 0xa3, 0xdb, 0x75, 0xfb, 0xbb, 0xdd, - 0x40, 0x2c, 0xc0, 0x84, 0x59, 0x9b, 0xf6, 0x71, 0xcf, 0x53, 0x0b, 0x05, 0x7e, 0xbf, 0x9b, 0x02, - 0x82, 0x15, 0x86, 0xdf, 0x9f, 0x43, 0x9f, 0xb6, 0x88, 0xba, 0x6e, 0xd1, 0xfd, 0xd9, 0xe2, 0x40, - 0x2c, 0x71, 0xfc, 0x90, 0x0f, 0x1d, 0xe2, 0xda, 0x3b, 0x96, 0x67, 0xb5, 0x09, 0x55, 0x97, 0x23, - 0xda, 0xfa, 0x2d, 0x0d, 0x87, 0x13, 0x94, 0xe6, 0x7f, 0x32, 0x50, 0xdc, 0xf4, 0x3d, 0xdb, 0x09, - 0xd4, 0xe5, 0x0a, 0xfa, 0xdd, 0x91, 0xe0, 0x71, 0xbb, 0xdf, 0x25, 0x58, 0x60, 0xd0, 0x73, 0x30, - 0xcb, 0x02, 0x2b, 0xe8, 0x31, 0x61, 0x4f, 0xb1, 0xf1, 0x48, 0x18, 0x96, 0xf6, 0x05, 0xf4, 0x6c, - 0x50, 0x5b, 0x88, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x03, 0xb1, 0x51, 0xf6, 0x4d, - 0x99, 0xf6, 0xc2, 0xfc, 0x91, 0x8d, 0x3d, 0x7d, 0x77, 0x84, 0x02, 0x8f, 0xe1, 0x42, 0x27, 0x80, - 0x5c, 0x8b, 0x05, 0xb7, 0xa9, 0xe5, 0x31, 0xa1, 0xeb, 0xb6, 0xd3, 0x21, 0xea, 0xc2, 0x7f, 0x29, - 0xdd, 0x89, 0x73, 0x8e, 0x58, 0xef, 0xad, 0x11, 0x69, 0x78, 0x8c, 0x06, 0xf4, 0x38, 0xcc, 0x52, - 0x62, 0x31, 0xdf, 0xab, 0xe6, 0xc5, 0xf2, 0xa3, 0xa8, 0x8c, 0x05, 0x14, 0x2b, 0x2c, 0x0f, 0x68, - 0x1d, 0xc2, 0x98, 0xd5, 0x0e, 0xc3, 0x6b, 0x14, 0xd0, 0x76, 0x24, 0x18, 0x87, 0x78, 0xf3, 0xb7, - 0x06, 0x54, 0x36, 0x29, 0xb1, 0x02, 0x32, 0x8d, 0x5b, 0x7c, 0xea, 0x13, 0x47, 0x1b, 0xb0, 0x20, - 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0xc8, 0x09, 0xe6, 0xcf, 0x2b, 0xe6, 0x85, 0xad, 0x24, - 0x1a, 0x0f, 0xd3, 0x9b, 0x3f, 0xc9, 0x42, 0xa5, 0x49, 0x5c, 0x12, 0x9b, 0xbc, 0x05, 0xa8, 0x4d, - 0xad, 0x16, 0xd9, 0x23, 0xd4, 0xf1, 0xed, 0x7d, 0xd2, 0xf2, 0x3d, 0x9b, 0x09, 0x37, 0xca, 0x36, - 0x3e, 0xc7, 0xf7, 0xf7, 0xe6, 0x08, 0x16, 0x8f, 0xe1, 0x40, 0x2e, 0x54, 0xba, 0x54, 0xfc, 0x16, - 0x7b, 0x2e, 0xbd, 0xac, 0x74, 0xf5, 0x2b, 0xe9, 0x8e, 0x74, 0x4f, 0x67, 0x6d, 0x2c, 0x9d, 0x0e, - 0x6a, 0x95, 0x04, 0x08, 0x27, 0x85, 0xa3, 0x6f, 0xc0, 0xa2, 0x4f, 0xbb, 0x47, 0x96, 0xd7, 0x24, - 0x5d, 0xe2, 0xd9, 0xc4, 0x0b, 0x98, 0xd8, 0xc8, 0x42, 0x63, 0x99, 0xd7, 0x22, 0xbb, 0x43, 0x38, - 0x3c, 0x42, 0x8d, 0x5e, 0x83, 0xa5, 0x2e, 0xf5, 0xbb, 0x56, 0x5b, 0x6c, 0xcc, 0x9e, 0xef, 0x3a, - 0xad, 0xbe, 0xda, 0xce, 0x27, 0x4f, 0x07, 0xb5, 0xa5, 0xbd, 0x61, 0xe4, 0xd9, 0xa0, 0x76, 0x41, - 0x6c, 0x1d, 0x87, 0xc4, 0x48, 0x3c, 0x2a, 0x46, 0x73, 0x83, 0xfc, 0x24, 0x37, 0x30, 0xb7, 0xa1, - 0xd0, 0xec, 0xa9, 0x3b, 0xf1, 0x02, 0x14, 0x6c, 0xf5, 0x5b, 0xed, 0x7c, 0x78, 0x39, 0x23, 0x9a, - 0xb3, 0x41, 0xad, 0xc2, 0xcb, 0xcf, 0x7a, 0x08, 0xc0, 0x11, 0x8b, 0xf9, 0x38, 0x14, 0xc4, 0xc1, - 0xb3, 0xbb, 0x57, 0xd0, 0x22, 0x64, 0xb1, 0x75, 0x4f, 0x48, 0x29, 0x63, 0xfe, 0x53, 0x8b, 0x62, - 0xbb, 0x00, 0x37, 0x49, 0x10, 0x1e, 0xfc, 0x06, 0x2c, 0x84, 0xa1, 0x3c, 0x99, 0x61, 0x22, 0x6f, - 0xc2, 0x49, 0x34, 0x1e, 0xa6, 0x37, 0x5f, 0x87, 0xa2, 0xc8, 0x42, 0x3c, 0x85, 0xc7, 0xe5, 0x82, - 0x71, 0x9f, 0x72, 0x21, 0xac, 0x01, 0x32, 0x93, 0x6a, 0x00, 0xcd, 0x5c, 0x17, 0x2a, 0x92, 0x37, - 0x2c, 0x90, 0x52, 0x69, 0x78, 0x12, 0x0a, 0xa1, 0x99, 0x4a, 0x4b, 0x54, 0x18, 0x87, 0x82, 0x70, - 0x44, 0xa1, 0x69, 0x3b, 0x82, 0x44, 0x46, 0x4d, 0xa7, 0x4c, 0xab, 0x7e, 0x32, 0xf7, 0xaf, 0x7e, - 0x34, 0x4d, 0x3f, 0x84, 0xea, 0xa4, 0x6a, 0xfa, 0x01, 0x72, 0x7e, 0x7a, 0x53, 0xcc, 0x77, 0x0c, - 0x58, 0xd4, 0x25, 0xa5, 0x3f, 0xbe, 0xf4, 0x4a, 0xce, 0xaf, 0xf6, 0xb4, 0x1d, 0xf9, 0x95, 0x01, - 0xcb, 0x89, 0xa5, 0x4d, 0x75, 0xe2, 0x53, 0x18, 0xa5, 0x3b, 0x47, 0x76, 0x0a, 0xe7, 0xf8, 0x6b, - 0x06, 0x2a, 0xb7, 0xac, 0x03, 0xe2, 0xee, 0x13, 0x97, 0xb4, 0x02, 0x9f, 0xa2, 0x1f, 0x40, 0xa9, - 0x63, 0x05, 0xad, 0x23, 0x01, 0x0d, 0x3b, 0x83, 0x66, 0xba, 0x60, 0x97, 0x90, 0x54, 0xdf, 0x89, - 0xc5, 0xdc, 0xf0, 0x02, 0xda, 0x6f, 0x5c, 0x50, 0x26, 0x95, 0x34, 0x0c, 0xd6, 0xb5, 0x89, 0x76, - 0x4e, 0x7c, 0xdf, 0x78, 0xab, 0xcb, 0xcb, 0x96, 0xe9, 0xbb, 0xc8, 0x84, 0x09, 0x98, 0xbc, 0xd9, - 0x73, 0x28, 0xe9, 0x10, 0x2f, 0x88, 0xdb, 0xb9, 0x9d, 0x21, 0xf9, 0x78, 0x44, 0xe3, 0xca, 0x8b, - 0xb0, 0x38, 0x6c, 0x3c, 0x8f, 0x3f, 0xc7, 0xa4, 0x2f, 0xcf, 0x0b, 0xf3, 0x9f, 0x68, 0x19, 0xf2, - 0x27, 0x96, 0xdb, 0x53, 0xb7, 0x11, 0xcb, 0x8f, 0xeb, 0x99, 0x6b, 0x86, 0xf9, 0x1b, 0x03, 0xaa, - 0x93, 0x0c, 0x41, 0x5f, 0xd4, 0x04, 0x35, 0x4a, 0xca, 0xaa, 0xec, 0x2b, 0xa4, 0x2f, 0xa5, 0xde, - 0x80, 0x82, 0xdf, 0xe5, 0x35, 0x85, 0x4f, 0xd5, 0xa9, 0x3f, 0x11, 0x9e, 0xe4, 0xae, 0x82, 0x9f, - 0x0d, 0x6a, 0x17, 0x13, 0xe2, 0x43, 0x04, 0x8e, 0x58, 0x79, 0xa4, 0x16, 0xf6, 0xf0, 0xec, 0x11, - 0x45, 0xea, 0xbb, 0x02, 0x82, 0x15, 0xc6, 0xfc, 0x83, 0x01, 0x39, 0x51, 0x90, 0xbf, 0x0e, 0x05, - 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0xad, 0x20, 0xe7, 0xde, 0x21, 0x81, 0x15, 0x7b, - 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xef, 0x04, 0xa4, 0x13, 0x1e, 0xe4, 0x53, 0x13, 0x45, - 0xab, 0x87, 0x88, 0x3a, 0xb6, 0xee, 0xdd, 0x78, 0x2b, 0x20, 0x1e, 0x3f, 0x8c, 0xf8, 0x6a, 0x6c, - 0x73, 0x19, 0x58, 0x8a, 0x32, 0xff, 0x6d, 0x40, 0xa4, 0x8a, 0x3b, 0x3f, 0x23, 0xee, 0xe1, 0x2d, - 0xc7, 0x3b, 0x56, 0xdb, 0x1a, 0x99, 0xb3, 0xaf, 0xe0, 0x38, 0xa2, 0x18, 0x97, 0x1e, 0x32, 0xd3, - 0xa5, 0x07, 0xae, 0xb0, 0xe5, 0x7b, 0x81, 0xe3, 0xf5, 0x46, 0x6e, 0xdb, 0xa6, 0x82, 0xe3, 0x88, - 0x82, 0x17, 0x22, 0x94, 0x74, 0x2c, 0xc7, 0x73, 0xbc, 0x36, 0x5f, 0xc4, 0xa6, 0xdf, 0xf3, 0x02, - 0x91, 0x91, 0x55, 0x21, 0x82, 0x47, 0xb0, 0x78, 0x0c, 0x87, 0xf9, 0xfb, 0x1c, 0x94, 0xf8, 0x9a, - 0xc3, 0x3c, 0xf7, 0x3c, 0x54, 0x5c, 0xdd, 0x0b, 0xd4, 0xda, 0x2f, 0x2a, 0x53, 0x92, 0xf7, 0x1a, - 0x27, 0x69, 0x39, 0xb3, 0x28, 0xa1, 0x22, 0xe6, 0x4c, 0x92, 0x79, 0x4b, 0x47, 0xe2, 0x24, 0x2d, - 0x8f, 0x5e, 0xf7, 0xf8, 0xfd, 0x50, 0x95, 0x49, 0x74, 0x44, 0xdf, 0xe4, 0x40, 0x2c, 0x71, 0x68, - 0x07, 0x2e, 0x58, 0xae, 0xeb, 0xdf, 0x13, 0xc0, 0x86, 0xef, 0x1f, 0x77, 0x2c, 0x7a, 0xcc, 0x44, - 0x33, 0x5d, 0x68, 0x7c, 0x41, 0xb1, 0x5c, 0xd8, 0x18, 0x25, 0xc1, 0xe3, 0xf8, 0xc6, 0x1d, 0x5b, - 0x6e, 0xca, 0x63, 0x3b, 0x82, 0xe5, 0x21, 0x90, 0xb8, 0xe5, 0xaa, 0xb3, 0x7d, 0x46, 0xc9, 0x59, - 0xc6, 0x63, 0x68, 0xce, 0x26, 0xc0, 0xf1, 0x58, 0x89, 0xe8, 0x3a, 0xcc, 0x73, 0x4f, 0xf6, 0x7b, - 0x41, 0x58, 0x77, 0xe6, 0xc5, 0x71, 0xa3, 0xd3, 0x41, 0x6d, 0xfe, 0x76, 0x02, 0x83, 0x87, 0x28, - 0xf9, 0xe6, 0xba, 0x4e, 0xc7, 0x09, 0xaa, 0x73, 0x82, 0x25, 0xda, 0xdc, 0x5b, 0x1c, 0x88, 0x25, - 0x2e, 0xe1, 0x81, 0x85, 0xf3, 0x3c, 0xd0, 0xfc, 0x4b, 0x16, 0x90, 0xac, 0xb5, 0x6d, 0x59, 0x4f, - 0xc9, 0x90, 0xc6, 0x3b, 0x02, 0x55, 0xab, 0x1b, 0x43, 0x1d, 0x81, 0x2a, 0xd3, 0x43, 0x3c, 0xda, - 0x81, 0xa2, 0x0c, 0x2d, 0xf1, 0x75, 0x59, 0x57, 0xc4, 0xc5, 0xdd, 0x10, 0x71, 0x36, 0xa8, 0xad, - 0x24, 0xd4, 0x44, 0x18, 0xd1, 0xad, 0xc5, 0x12, 0xd0, 0x55, 0x00, 0xab, 0xeb, 0xe8, 0xef, 0x75, - 0xc5, 0xf8, 0xd5, 0x26, 0xee, 0xbc, 0xb1, 0x46, 0x85, 0x5e, 0x82, 0x5c, 0xf0, 0xe9, 0x3a, 0xaa, - 0x82, 0x68, 0x18, 0x79, 0xff, 0x24, 0x24, 0x70, 0xed, 0xc2, 0x9f, 0x19, 0x37, 0x4b, 0x35, 0x43, - 0x91, 0xf6, 0xad, 0x08, 0x83, 0x35, 0x2a, 0xf4, 0x2d, 0x28, 0x1c, 0xaa, 0x52, 0x54, 0x1c, 0x4c, - 0xea, 0x10, 0x19, 0x16, 0xb0, 0xf2, 0xc9, 0x20, 0xfc, 0xc2, 0x91, 0x34, 0xf4, 0x55, 0x28, 0xb1, - 0xde, 0x41, 0x94, 0xbd, 0xe5, 0x69, 0x46, 0xa9, 0x72, 0x3f, 0x46, 0x61, 0x9d, 0xce, 0x7c, 0x13, - 0x8a, 0x3b, 0x4e, 0x8b, 0xfa, 0xa2, 0x07, 0x7c, 0x02, 0xe6, 0x58, 0xa2, 0xc1, 0x89, 0x4e, 0x32, - 0xf4, 0xb2, 0x10, 0xcf, 0xdd, 0xcb, 0xb3, 0x3c, 0x5f, 0xb6, 0x31, 0xf9, 0xd8, 0xbd, 0x5e, 0xe5, - 0x40, 0x2c, 0x71, 0xd7, 0x97, 0x79, 0x81, 0xf0, 0xd3, 0xf7, 0x6b, 0x33, 0xef, 0xbe, 0x5f, 0x9b, - 0x79, 0xef, 0x7d, 0x55, 0x2c, 0xfc, 0x11, 0x00, 0x76, 0x0f, 0xbe, 0x47, 0x5a, 0x32, 0xec, 0xa6, - 0x7a, 0xd6, 0x0b, 0x5f, 0x93, 0xc5, 0xb3, 0x5e, 0x66, 0xa8, 0xe8, 0xd3, 0x70, 0x38, 0x41, 0x89, - 0xd6, 0xa1, 0x18, 0x3d, 0xd8, 0x29, 0xff, 0x58, 0x0a, 0xfd, 0x2d, 0x7a, 0xd5, 0xc3, 0x31, 0x4d, - 0x22, 0x07, 0xe4, 0xce, 0xcd, 0x01, 0x0d, 0xc8, 0xf6, 0x1c, 0x5b, 0x35, 0xcc, 0x4f, 0x87, 0x39, - 0xf8, 0xce, 0x76, 0xf3, 0x6c, 0x50, 0x7b, 0x64, 0xd2, 0x3b, 0x79, 0xd0, 0xef, 0x12, 0x56, 0xbf, - 0xb3, 0xdd, 0xc4, 0x9c, 0x79, 0x5c, 0x40, 0x9a, 0x9d, 0x32, 0x20, 0x5d, 0x05, 0x68, 0xc7, 0xcf, - 0x0e, 0xf2, 0xbe, 0x47, 0x8e, 0xa8, 0x3d, 0x37, 0x68, 0x54, 0x88, 0xc1, 0x52, 0x8b, 0xb7, 0xe6, - 0xaa, 0xfd, 0x67, 0x81, 0xd5, 0x91, 0x0f, 0x99, 0xd3, 0xdd, 0x89, 0x4b, 0x4a, 0xcd, 0xd2, 0xe6, - 0xb0, 0x30, 0x3c, 0x2a, 0x1f, 0xf9, 0xb0, 0x64, 0xab, 0x0e, 0x31, 0x56, 0x5a, 0x9c, 0x5a, 0xe9, - 0x45, 0xae, 0xb0, 0x39, 0x2c, 0x08, 0x8f, 0xca, 0x46, 0xdf, 0x85, 0x95, 0x10, 0x38, 0xda, 0xa6, - 0x8b, 0x80, 0x9d, 0x6d, 0xac, 0x9e, 0x0e, 0x6a, 0x2b, 0xcd, 0x89, 0x54, 0xf8, 0x3e, 0x12, 0x90, - 0x0d, 0xb3, 0xae, 0x2c, 0x70, 0x4b, 0xa2, 0x28, 0xf9, 0x5a, 0xba, 0x55, 0xc4, 0xde, 0x5f, 0xd7, - 0x0b, 0xdb, 0xe8, 0xc9, 0x45, 0xd5, 0xb4, 0x4a, 0x36, 0x7a, 0x0b, 0x4a, 0x96, 0xe7, 0xf9, 0x81, - 0x25, 0x1f, 0x0e, 0xca, 0x42, 0xd5, 0xc6, 0xd4, 0xaa, 0x36, 0x62, 0x19, 0x43, 0x85, 0xb4, 0x86, - 0xc1, 0xba, 0x2a, 0x74, 0x0f, 0x16, 0xfc, 0x7b, 0x1e, 0xa1, 0x98, 0x1c, 0x12, 0x4a, 0xbc, 0x16, - 0x61, 0xd5, 0x8a, 0xd0, 0xfe, 0x4c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x87, - 0xb5, 0xa0, 0x3a, 0x8f, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe7, 0xe3, 0xb7, 0xe6, - 0xad, 0x08, 0x8a, 0x35, 0x0a, 0xd4, 0x83, 0x4a, 0x47, 0x4f, 0x19, 0xd5, 0x25, 0x61, 0xe6, 0xb5, - 0x74, 0x66, 0x8e, 0x26, 0xb5, 0xb8, 0x82, 0x49, 0xe0, 0x70, 0x52, 0xcb, 0xca, 0x73, 0x50, 0xfa, - 0x94, 0xc5, 0x3d, 0x6f, 0x0e, 0x86, 0x0f, 0x64, 0xaa, 0xe6, 0xe0, 0x4f, 0x19, 0x98, 0x4f, 0x6e, - 0xe3, 0x50, 0x3a, 0xcc, 0xa7, 0x4a, 0x87, 0x61, 0x1b, 0x6a, 0x4c, 0x1c, 0x3a, 0x84, 0xf1, 0x39, - 0x3b, 0x31, 0x3e, 0xab, 0x30, 0x98, 0x7b, 0x90, 0x30, 0x58, 0x07, 0xe0, 0x75, 0x06, 0xf5, 0x5d, - 0x97, 0x50, 0x11, 0x01, 0x0b, 0x6a, 0xb8, 0x10, 0x41, 0xb1, 0x46, 0xc1, 0xab, 0xe1, 0x03, 0xd7, - 0x6f, 0x1d, 0x8b, 0x2d, 0x08, 0x6f, 0xaf, 0x88, 0x7d, 0x05, 0x59, 0x0d, 0x37, 0x46, 0xb0, 0x78, - 0x0c, 0x87, 0xd9, 0x87, 0x8b, 0x7b, 0x16, 0x0d, 0x1c, 0xcb, 0x8d, 0x6f, 0x8a, 0x68, 0x37, 0xde, - 0x18, 0x69, 0x66, 0x9e, 0x9e, 0xf6, 0xc6, 0xc5, 0x9b, 0x1f, 0xc3, 0xe2, 0x86, 0xc6, 0xfc, 0x9b, - 0x01, 0x97, 0xc6, 0xea, 0xfe, 0x0c, 0x9a, 0xa9, 0x37, 0x92, 0xcd, 0xd4, 0xf3, 0x29, 0x5f, 0x21, - 0xc7, 0x59, 0x3b, 0xa1, 0xb5, 0x9a, 0x83, 0xfc, 0x1e, 0x2f, 0x62, 0xcd, 0x0f, 0x0d, 0x28, 0x8b, - 0x5f, 0xd3, 0x3c, 0x02, 0xd7, 0x92, 0xb3, 0x81, 0xe2, 0xc3, 0x9b, 0x0b, 0x3c, 0x8c, 0x57, 0xe2, - 0x77, 0x0c, 0x48, 0x3e, 0xbf, 0xa2, 0x17, 0xe5, 0x15, 0x30, 0xa2, 0xf7, 0xd1, 0x29, 0xdd, 0xff, - 0x85, 0x49, 0xdd, 0xe4, 0x85, 0x54, 0x0f, 0x8d, 0x4f, 0x42, 0x11, 0xfb, 0x7e, 0xb0, 0x67, 0x05, - 0x47, 0x8c, 0xef, 0x5d, 0x97, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, - 0x37, 0xe0, 0xd2, 0xc4, 0x91, 0x0f, 0x8f, 0x22, 0xad, 0xe8, 0x4b, 0xad, 0x28, 0x72, 0xe4, 0x98, - 0x0e, 0x6b, 0x54, 0xbc, 0x0d, 0x4c, 0xcc, 0x89, 0x86, 0xdb, 0xc0, 0x84, 0x36, 0x9c, 0xa4, 0x35, - 0xff, 0x95, 0x01, 0x35, 0x63, 0xf9, 0x1f, 0x3b, 0xfd, 0xe3, 0x43, 0x13, 0x9e, 0xf9, 0xe4, 0x84, - 0x27, 0x1a, 0xe7, 0x68, 0x23, 0x8e, 0xec, 0xfd, 0x47, 0x1c, 0xe8, 0xd9, 0x68, 0x6a, 0x22, 0x7d, - 0x68, 0x35, 0x39, 0x35, 0x39, 0x1b, 0xd4, 0xca, 0x4a, 0x78, 0x72, 0x8a, 0xf2, 0x1a, 0xcc, 0xd9, - 0x24, 0xb0, 0x1c, 0x57, 0xb6, 0x74, 0xa9, 0xe7, 0x00, 0x52, 0x58, 0x53, 0xb2, 0x36, 0x4a, 0xdc, - 0x26, 0xf5, 0x81, 0x43, 0x81, 0x3c, 0x60, 0xb7, 0x7c, 0x5b, 0x76, 0x24, 0xf9, 0x38, 0x60, 0x6f, - 0xfa, 0x36, 0xc1, 0x02, 0x63, 0xbe, 0x6b, 0x40, 0x49, 0x4a, 0xda, 0xb4, 0x7a, 0x8c, 0xa0, 0x2b, - 0xd1, 0x2a, 0xe4, 0x71, 0x5f, 0xd2, 0xc7, 0x63, 0x67, 0x83, 0x5a, 0x51, 0x90, 0x89, 0x66, 0x66, - 0xcc, 0x18, 0x28, 0x73, 0xce, 0x1e, 0x3d, 0x0a, 0x79, 0x71, 0x81, 0xd4, 0x66, 0xc6, 0x73, 0x3e, - 0x0e, 0xc4, 0x12, 0x67, 0x7e, 0x9c, 0x81, 0x4a, 0x62, 0x71, 0x29, 0xfa, 0x82, 0xe8, 0xf5, 0x33, - 0x93, 0xe2, 0x45, 0x7d, 0xf2, 0x54, 0x5d, 0xa5, 0xaf, 0xd9, 0x07, 0x49, 0x5f, 0xdf, 0x86, 0xd9, - 0x16, 0xdf, 0xa3, 0xf0, 0x4f, 0x1a, 0x57, 0xa6, 0x39, 0x4e, 0xb1, 0xbb, 0xb1, 0x37, 0x8a, 0x4f, - 0x86, 0x95, 0x40, 0x74, 0x13, 0x96, 0x28, 0x09, 0x68, 0x7f, 0xe3, 0x30, 0x20, 0x54, 0x7f, 0x07, - 0xc8, 0xc7, 0xd5, 0x37, 0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0xe6, 0x01, 0x94, 0x6f, 0x5b, 0x07, 0x6e, - 0x34, 0xd9, 0xc2, 0x50, 0x71, 0xbc, 0x96, 0xdb, 0xb3, 0x89, 0x0c, 0xe8, 0x61, 0xf4, 0x0a, 0x2f, - 0xed, 0xb6, 0x8e, 0x3c, 0x1b, 0xd4, 0x2e, 0x24, 0x00, 0x72, 0x94, 0x83, 0x93, 0x22, 0x4c, 0x17, - 0x72, 0x9f, 0x61, 0x27, 0xf9, 0x1d, 0x28, 0xc6, 0xb5, 0xfe, 0x43, 0x56, 0x69, 0xbe, 0x01, 0x05, - 0xee, 0xf1, 0x61, 0x8f, 0x7a, 0x4e, 0x95, 0x94, 0xac, 0xbd, 0x32, 0x69, 0x6a, 0x2f, 0x31, 0x1f, - 0xbd, 0xd3, 0xb5, 0x1f, 0x70, 0x3e, 0x9a, 0x79, 0x90, 0xcc, 0x97, 0x9d, 0x32, 0xf3, 0x5d, 0x05, - 0xf9, 0x1f, 0x12, 0x9e, 0x64, 0x64, 0x01, 0xa1, 0x25, 0x19, 0x3d, 0xff, 0x6b, 0xc3, 0x81, 0x1f, - 0x1b, 0x00, 0xe2, 0x15, 0xee, 0xc6, 0x09, 0xf1, 0x82, 0x14, 0x93, 0xf8, 0x3b, 0x30, 0xeb, 0x4b, - 0x8f, 0x94, 0x33, 0xd2, 0x29, 0x9f, 0x7a, 0xa3, 0x8b, 0x24, 0x7d, 0x12, 0x2b, 0x61, 0x8d, 0x97, - 0x3f, 0xf8, 0x64, 0x75, 0xe6, 0xc3, 0x4f, 0x56, 0x67, 0x3e, 0xfa, 0x64, 0x75, 0xe6, 0xed, 0xd3, - 0x55, 0xe3, 0x83, 0xd3, 0x55, 0xe3, 0xc3, 0xd3, 0x55, 0xe3, 0xa3, 0xd3, 0x55, 0xe3, 0xe3, 0xd3, - 0x55, 0xe3, 0xdd, 0x7f, 0xac, 0xce, 0xbc, 0xf6, 0x58, 0x9a, 0xff, 0xe6, 0xfd, 0x37, 0x00, 0x00, - 0xff, 0xff, 0x0b, 0x4d, 0x51, 0xc5, 0xdb, 0x27, 0x00, 0x00, + 0x13, 0x77, 0x90, 0xc8, 0x31, 0x88, 0x4b, 0x24, 0xd0, 0x28, 0x31, 0x07, 0x8e, 0x88, 0xab, 0x85, + 0x04, 0xaa, 0x47, 0x77, 0x57, 0xcf, 0x63, 0xdd, 0x93, 0x5d, 0x22, 0x6e, 0xd3, 0xdf, 0xbb, 0xaa, + 0xbe, 0xfa, 0xea, 0x7b, 0x0c, 0xec, 0x1c, 0x5f, 0x63, 0x75, 0xc7, 0x5f, 0x3f, 0xee, 0x1d, 0x10, + 0xea, 0x91, 0x80, 0xb0, 0xf5, 0x13, 0xe2, 0xd9, 0x3e, 0x5d, 0x57, 0x08, 0xab, 0xeb, 0x74, 0xac, + 0xd6, 0x91, 0xe3, 0x11, 0xda, 0x5f, 0xef, 0x1e, 0xb7, 0x39, 0x80, 0xad, 0x77, 0x48, 0x60, 0xad, + 0x9f, 0x5c, 0x59, 0x6f, 0x13, 0x8f, 0x50, 0x2b, 0x20, 0x76, 0xbd, 0x4b, 0xfd, 0xc0, 0x47, 0x8f, + 0x49, 0xae, 0xba, 0xce, 0x55, 0xef, 0x1e, 0xb7, 0x39, 0x80, 0xd5, 0x39, 0x57, 0xfd, 0xe4, 0xca, + 0xca, 0x53, 0x6d, 0x27, 0x38, 0xea, 0x1d, 0xd4, 0x5b, 0x7e, 0x67, 0xbd, 0xed, 0xb7, 0xfd, 0x75, + 0xc1, 0x7c, 0xd0, 0x3b, 0x14, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0x42, 0x57, 0x26, 0x9a, 0x42, 0x7b, + 0x5e, 0xe0, 0x74, 0xc8, 0xb0, 0x15, 0x2b, 0xcf, 0x9e, 0xc7, 0xc0, 0x5a, 0x47, 0xa4, 0x63, 0x0d, + 0xf3, 0x99, 0x7f, 0xca, 0x42, 0x61, 0x63, 0x6f, 0xfb, 0x26, 0xf5, 0x7b, 0x5d, 0xb4, 0x06, 0x39, + 0xcf, 0xea, 0x90, 0xaa, 0xb1, 0x66, 0x5c, 0x2e, 0x36, 0xca, 0x1f, 0x0c, 0x6a, 0x33, 0xa7, 0x83, + 0x5a, 0xee, 0x55, 0xab, 0x43, 0xb0, 0xc0, 0x20, 0x17, 0x0a, 0x27, 0x84, 0x32, 0xc7, 0xf7, 0x58, + 0x35, 0xb3, 0x96, 0xbd, 0x5c, 0xba, 0xfa, 0x62, 0x3d, 0xcd, 0xfa, 0xeb, 0x42, 0xc1, 0x5d, 0xc9, + 0xba, 0xe5, 0xd3, 0xa6, 0xc3, 0x5a, 0xfe, 0x09, 0xa1, 0xfd, 0xc6, 0xa2, 0xd2, 0x52, 0x50, 0x48, + 0x86, 0x23, 0x0d, 0xe8, 0x47, 0x06, 0x2c, 0x76, 0x29, 0x39, 0x24, 0x94, 0x12, 0x5b, 0xe1, 0xab, + 0xd9, 0x35, 0xe3, 0x21, 0xa8, 0xad, 0x2a, 0xb5, 0x8b, 0x7b, 0x43, 0xf2, 0xf1, 0x88, 0x46, 0xf4, + 0x6b, 0x03, 0x56, 0x18, 0xa1, 0x27, 0x84, 0x6e, 0xd8, 0x36, 0x25, 0x8c, 0x35, 0xfa, 0x9b, 0xae, + 0x43, 0xbc, 0x60, 0x73, 0xbb, 0x89, 0x59, 0x35, 0x27, 0xf6, 0xe1, 0xeb, 0xe9, 0x0c, 0xda, 0x9f, + 0x24, 0xa7, 0x61, 0x2a, 0x8b, 0x56, 0x26, 0x92, 0x30, 0x7c, 0x1f, 0x33, 0xcc, 0x43, 0x28, 0x87, + 0x07, 0x79, 0xcb, 0x61, 0x01, 0xba, 0x0b, 0xb3, 0x6d, 0xfe, 0xc1, 0xaa, 0x86, 0x30, 0xb0, 0x9e, + 0xce, 0xc0, 0x50, 0x46, 0x63, 0x5e, 0xd9, 0x33, 0x2b, 0x3e, 0x19, 0x56, 0xd2, 0xcc, 0x9f, 0xe5, + 0xa0, 0xb4, 0xb1, 0xb7, 0x8d, 0x09, 0xf3, 0x7b, 0xb4, 0x45, 0x52, 0x38, 0xcd, 0x35, 0x28, 0x33, + 0xc7, 0x6b, 0xf7, 0x5c, 0x8b, 0x72, 0x68, 0x75, 0x56, 0x50, 0x2e, 0x2b, 0xca, 0xf2, 0xbe, 0x86, + 0xc3, 0x09, 0x4a, 0x74, 0x15, 0x80, 0x4b, 0x60, 0x5d, 0xab, 0x45, 0xec, 0x6a, 0x66, 0xcd, 0xb8, + 0x5c, 0x68, 0x20, 0xc5, 0x07, 0xaf, 0x46, 0x18, 0xac, 0x51, 0xa1, 0x47, 0x21, 0x2f, 0x2c, 0xad, + 0x16, 0x84, 0x9a, 0x8a, 0x22, 0xcf, 0x8b, 0x65, 0x60, 0x89, 0x43, 0x4f, 0xc0, 0x9c, 0xf2, 0xb2, + 0x6a, 0x51, 0x90, 0x2d, 0x28, 0xb2, 0xb9, 0xd0, 0x0d, 0x42, 0x3c, 0x5f, 0xdf, 0xb1, 0xe3, 0xd9, + 0xc2, 0xef, 0xb4, 0xf5, 0xbd, 0xe2, 0x78, 0x36, 0x16, 0x18, 0x74, 0x0b, 0xf2, 0x27, 0x84, 0x1e, + 0x70, 0x4f, 0xe0, 0xae, 0xf9, 0xe5, 0x74, 0x1b, 0x7d, 0x97, 0xb3, 0x34, 0x8a, 0xdc, 0x34, 0xf1, + 0x13, 0x4b, 0x21, 0xa8, 0x0e, 0xc0, 0x8e, 0x7c, 0x1a, 0x88, 0xe5, 0x55, 0xf3, 0x6b, 0xd9, 0xcb, + 0xc5, 0xc6, 0x3c, 0x5f, 0xef, 0x7e, 0x04, 0xc5, 0x1a, 0x05, 0xa7, 0x6f, 0x59, 0x01, 0x69, 0xfb, + 0xd4, 0x21, 0xac, 0x3a, 0x17, 0xd3, 0x6f, 0x46, 0x50, 0xac, 0x51, 0xa0, 0x97, 0x01, 0xb1, 0xc0, + 0xa7, 0x56, 0x9b, 0xa8, 0xa5, 0xbe, 0x64, 0xb1, 0xa3, 0x2a, 0x88, 0xd5, 0xad, 0xa8, 0xd5, 0xa1, + 0xfd, 0x11, 0x0a, 0x3c, 0x86, 0xcb, 0xfc, 0x9d, 0x01, 0x0b, 0x9a, 0x2f, 0x08, 0xbf, 0xbb, 0x06, + 0xe5, 0xb6, 0x76, 0xeb, 0x94, 0x5f, 0x44, 0xa7, 0xad, 0xdf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a, + 0x54, 0x49, 0x0a, 0xa3, 0xcb, 0x95, 0xd4, 0x4e, 0x1b, 0xda, 0x10, 0x6b, 0xd2, 0x80, 0x0c, 0xc7, + 0x92, 0xcd, 0x7f, 0x18, 0xc2, 0x81, 0xc3, 0x78, 0x83, 0x2e, 0x6b, 0x31, 0xcd, 0x10, 0xdb, 0x57, + 0x9e, 0x10, 0x8f, 0xce, 0x09, 0x04, 0x99, 0xff, 0x8b, 0x40, 0x70, 0xbd, 0xf0, 0xcb, 0xf7, 0x6a, + 0x33, 0x6f, 0xff, 0x6d, 0x6d, 0xc6, 0xfc, 0x85, 0x01, 0xe5, 0x8d, 0x6e, 0xd7, 0xed, 0xef, 0x76, + 0x03, 0xb1, 0x00, 0x13, 0x66, 0x6d, 0xda, 0xc7, 0x3d, 0x4f, 0x2d, 0x14, 0xf8, 0xfd, 0x6e, 0x0a, + 0x08, 0x56, 0x18, 0x7e, 0x7f, 0x0e, 0x7d, 0xda, 0x22, 0xea, 0xba, 0x45, 0xf7, 0x67, 0x8b, 0x03, + 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x74, 0x88, 0x6b, 0xef, 0x58, 0x9e, 0xd5, 0x26, 0x54, 0x5d, 0x8e, + 0x68, 0xeb, 0xb7, 0x34, 0x1c, 0x4e, 0x50, 0x9a, 0xff, 0xc9, 0x40, 0x71, 0xd3, 0xf7, 0x6c, 0x27, + 0x50, 0x97, 0x2b, 0xe8, 0x77, 0x47, 0x82, 0xc7, 0xed, 0x7e, 0x97, 0x60, 0x81, 0x41, 0xcf, 0xc1, + 0x2c, 0x0b, 0xac, 0xa0, 0xc7, 0x84, 0x3d, 0xc5, 0xc6, 0x23, 0x61, 0x58, 0xda, 0x17, 0xd0, 0xb3, + 0x41, 0x6d, 0x21, 0x12, 0x27, 0x41, 0x58, 0x31, 0x70, 0x4f, 0xf7, 0x0f, 0xc4, 0x46, 0xd9, 0x37, + 0xe5, 0xb3, 0x17, 0xbe, 0x1f, 0xd9, 0xd8, 0xd3, 0x77, 0x47, 0x28, 0xf0, 0x18, 0x2e, 0x74, 0x02, + 0xc8, 0xb5, 0x58, 0x70, 0x9b, 0x5a, 0x1e, 0x13, 0xba, 0x6e, 0x3b, 0x1d, 0xa2, 0x2e, 0xfc, 0x97, + 0xd2, 0x9d, 0x38, 0xe7, 0x88, 0xf5, 0xde, 0x1a, 0x91, 0x86, 0xc7, 0x68, 0x40, 0x8f, 0xc3, 0x2c, + 0x25, 0x16, 0xf3, 0xbd, 0x6a, 0x5e, 0x2c, 0x3f, 0x8a, 0xca, 0x58, 0x40, 0xb1, 0xc2, 0xf2, 0x80, + 0xd6, 0x21, 0x8c, 0x59, 0xed, 0x30, 0xbc, 0x46, 0x01, 0x6d, 0x47, 0x82, 0x71, 0x88, 0x37, 0x7f, + 0x6b, 0x40, 0x65, 0x93, 0x12, 0x2b, 0x20, 0xd3, 0xb8, 0xc5, 0xa7, 0x3e, 0x71, 0xb4, 0x01, 0x0b, + 0xe2, 0xfb, 0xae, 0xe5, 0x3a, 0xb6, 0x3c, 0x83, 0x9c, 0x60, 0xfe, 0xbc, 0x62, 0x5e, 0xd8, 0x4a, + 0xa2, 0xf1, 0x30, 0xbd, 0xf9, 0x93, 0x2c, 0x54, 0x9a, 0xc4, 0x25, 0xb1, 0xc9, 0x5b, 0x80, 0xda, + 0xd4, 0x6a, 0x91, 0x3d, 0x42, 0x1d, 0xdf, 0xde, 0x27, 0x2d, 0xdf, 0xb3, 0x99, 0x70, 0xa3, 0x6c, + 0xe3, 0x73, 0x7c, 0x7f, 0x6f, 0x8e, 0x60, 0xf1, 0x18, 0x0e, 0xe4, 0x42, 0xa5, 0x4b, 0xc5, 0x6f, + 0xb1, 0xe7, 0xd2, 0xcb, 0x4a, 0x57, 0xbf, 0x92, 0xee, 0x48, 0xf7, 0x74, 0xd6, 0xc6, 0xd2, 0xe9, + 0xa0, 0x56, 0x49, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x06, 0x2c, 0xfa, 0xb4, 0x7b, 0x64, 0x79, 0x4d, + 0xd2, 0x25, 0x9e, 0x4d, 0xbc, 0x80, 0x89, 0x8d, 0x2c, 0x34, 0x96, 0x79, 0x2e, 0xb2, 0x3b, 0x84, + 0xc3, 0x23, 0xd4, 0xe8, 0x35, 0x58, 0xea, 0x52, 0xbf, 0x6b, 0xb5, 0xc5, 0xc6, 0xec, 0xf9, 0xae, + 0xd3, 0xea, 0xab, 0xed, 0x7c, 0xf2, 0x74, 0x50, 0x5b, 0xda, 0x1b, 0x46, 0x9e, 0x0d, 0x6a, 0x17, + 0xc4, 0xd6, 0x71, 0x48, 0x8c, 0xc4, 0xa3, 0x62, 0x34, 0x37, 0xc8, 0x4f, 0x72, 0x03, 0x73, 0x1b, + 0x0a, 0xcd, 0x9e, 0xba, 0x13, 0x2f, 0x40, 0xc1, 0x56, 0xbf, 0xd5, 0xce, 0x87, 0x97, 0x33, 0xa2, + 0x39, 0x1b, 0xd4, 0x2a, 0x3c, 0xfd, 0xac, 0x87, 0x00, 0x1c, 0xb1, 0x98, 0x8f, 0x43, 0x41, 0x1c, + 0x3c, 0xbb, 0x7b, 0x05, 0x2d, 0x42, 0x16, 0x5b, 0xf7, 0x84, 0x94, 0x32, 0xe6, 0x3f, 0xb5, 0x28, + 0xb6, 0x0b, 0x70, 0x93, 0x04, 0xe1, 0xc1, 0x6f, 0xc0, 0x42, 0x18, 0xca, 0x93, 0x2f, 0x4c, 0xe4, + 0x4d, 0x38, 0x89, 0xc6, 0xc3, 0xf4, 0xe6, 0xeb, 0x50, 0x14, 0xaf, 0x10, 0x7f, 0xc2, 0xe3, 0x74, + 0xc1, 0xb8, 0x4f, 0xba, 0x10, 0xe6, 0x00, 0x99, 0x49, 0x39, 0x80, 0x66, 0xae, 0x0b, 0x15, 0xc9, + 0x1b, 0x26, 0x48, 0xa9, 0x34, 0x3c, 0x09, 0x85, 0xd0, 0x4c, 0xa5, 0x25, 0x4a, 0x8c, 0x43, 0x41, + 0x38, 0xa2, 0xd0, 0xb4, 0x1d, 0x41, 0xe2, 0x45, 0x4d, 0xa7, 0x4c, 0xcb, 0x7e, 0x32, 0xf7, 0xcf, + 0x7e, 0x34, 0x4d, 0x3f, 0x84, 0xea, 0xa4, 0x6c, 0xfa, 0x01, 0xde, 0xfc, 0xf4, 0xa6, 0x98, 0xef, + 0x18, 0xb0, 0xa8, 0x4b, 0x4a, 0x7f, 0x7c, 0xe9, 0x95, 0x9c, 0x9f, 0xed, 0x69, 0x3b, 0xf2, 0x2b, + 0x03, 0x96, 0x13, 0x4b, 0x9b, 0xea, 0xc4, 0xa7, 0x30, 0x4a, 0x77, 0x8e, 0xec, 0x14, 0xce, 0xf1, + 0x97, 0x0c, 0x54, 0x6e, 0x59, 0x07, 0xc4, 0xdd, 0x27, 0x2e, 0x69, 0x05, 0x3e, 0x45, 0x3f, 0x80, + 0x52, 0xc7, 0x0a, 0x5a, 0x47, 0x02, 0x1a, 0x56, 0x06, 0xcd, 0x74, 0xc1, 0x2e, 0x21, 0xa9, 0xbe, + 0x13, 0x8b, 0xb9, 0xe1, 0x05, 0xb4, 0xdf, 0xb8, 0xa0, 0x4c, 0x2a, 0x69, 0x18, 0xac, 0x6b, 0x13, + 0xe5, 0x9c, 0xf8, 0xbe, 0xf1, 0x56, 0x97, 0xa7, 0x2d, 0xd3, 0x57, 0x91, 0x09, 0x13, 0x30, 0x79, + 0xb3, 0xe7, 0x50, 0xd2, 0x21, 0x5e, 0x10, 0x97, 0x73, 0x3b, 0x43, 0xf2, 0xf1, 0x88, 0xc6, 0x95, + 0x17, 0x61, 0x71, 0xd8, 0x78, 0x1e, 0x7f, 0x8e, 0x49, 0x5f, 0x9e, 0x17, 0xe6, 0x3f, 0xd1, 0x32, + 0xe4, 0x4f, 0x2c, 0xb7, 0xa7, 0x6e, 0x23, 0x96, 0x1f, 0xd7, 0x33, 0xd7, 0x0c, 0xf3, 0x37, 0x06, + 0x54, 0x27, 0x19, 0x82, 0xbe, 0xa8, 0x09, 0x6a, 0x94, 0x94, 0x55, 0xd9, 0x57, 0x48, 0x5f, 0x4a, + 0xbd, 0x01, 0x05, 0xbf, 0xcb, 0x73, 0x0a, 0x9f, 0xaa, 0x53, 0x7f, 0x22, 0x3c, 0xc9, 0x5d, 0x05, + 0x3f, 0x1b, 0xd4, 0x2e, 0x26, 0xc4, 0x87, 0x08, 0x1c, 0xb1, 0xf2, 0x48, 0x2d, 0xec, 0xe1, 0xaf, + 0x47, 0x14, 0xa9, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0xf7, 0x06, 0xe4, 0x44, 0x42, 0xfe, 0x3a, + 0x14, 0xf8, 0xfe, 0xd9, 0x56, 0x60, 0x09, 0xbb, 0x52, 0x97, 0x82, 0x9c, 0x7b, 0x87, 0x04, 0x56, + 0xec, 0x6d, 0x21, 0x04, 0x47, 0x12, 0x11, 0x86, 0xbc, 0x13, 0x90, 0x4e, 0x78, 0x90, 0x4f, 0x4d, + 0x14, 0xad, 0x1a, 0x11, 0x75, 0x6c, 0xdd, 0xbb, 0xf1, 0x56, 0x40, 0x3c, 0x7e, 0x18, 0xf1, 0xd5, + 0xd8, 0xe6, 0x32, 0xb0, 0x14, 0x65, 0xfe, 0xcb, 0x80, 0x48, 0x15, 0x77, 0x7e, 0x46, 0xdc, 0xc3, + 0x5b, 0x8e, 0x77, 0xac, 0xb6, 0x35, 0x32, 0x67, 0x5f, 0xc1, 0x71, 0x44, 0x31, 0xee, 0x79, 0xc8, + 0x4c, 0xf7, 0x3c, 0x70, 0x85, 0x2d, 0xdf, 0x0b, 0x1c, 0xaf, 0x37, 0x72, 0xdb, 0x36, 0x15, 0x1c, + 0x47, 0x14, 0x3c, 0x11, 0xa1, 0xa4, 0x63, 0x39, 0x9e, 0xe3, 0xb5, 0xf9, 0x22, 0x36, 0xfd, 0x9e, + 0x17, 0x88, 0x17, 0x59, 0x25, 0x22, 0x78, 0x04, 0x8b, 0xc7, 0x70, 0x98, 0xff, 0xce, 0x41, 0x89, + 0xaf, 0x39, 0x7c, 0xe7, 0x9e, 0x87, 0x8a, 0xab, 0x7b, 0x81, 0x5a, 0xfb, 0x45, 0x65, 0x4a, 0xf2, + 0x5e, 0xe3, 0x24, 0x2d, 0x67, 0x16, 0x29, 0x54, 0xc4, 0x9c, 0x49, 0x32, 0x6f, 0xe9, 0x48, 0x9c, + 0xa4, 0xe5, 0xd1, 0xeb, 0x1e, 0xbf, 0x1f, 0x2a, 0x33, 0x89, 0x8e, 0xe8, 0x9b, 0x1c, 0x88, 0x25, + 0x0e, 0xed, 0xc0, 0x05, 0xcb, 0x75, 0xfd, 0x7b, 0x02, 0xd8, 0xf0, 0xfd, 0xe3, 0x8e, 0x45, 0x8f, + 0x99, 0x28, 0xa6, 0x0b, 0x8d, 0x2f, 0x28, 0x96, 0x0b, 0x1b, 0xa3, 0x24, 0x78, 0x1c, 0xdf, 0xb8, + 0x63, 0xcb, 0x4d, 0x79, 0x6c, 0x47, 0xb0, 0x3c, 0x04, 0x12, 0xb7, 0x5c, 0x55, 0xb6, 0xcf, 0x28, + 0x39, 0xcb, 0x78, 0x0c, 0xcd, 0xd9, 0x04, 0x38, 0x1e, 0x2b, 0x11, 0x5d, 0x87, 0x79, 0xee, 0xc9, + 0x7e, 0x2f, 0x08, 0xf3, 0xce, 0xbc, 0x38, 0x6e, 0x74, 0x3a, 0xa8, 0xcd, 0xdf, 0x4e, 0x60, 0xf0, + 0x10, 0x25, 0xdf, 0x5c, 0xd7, 0xe9, 0x38, 0x41, 0x75, 0x4e, 0xb0, 0x44, 0x9b, 0x7b, 0x8b, 0x03, + 0xb1, 0xc4, 0x25, 0x3c, 0xb0, 0x70, 0xae, 0x07, 0x6e, 0xc2, 0x12, 0x23, 0x9e, 0xbd, 0xed, 0x39, + 0x81, 0x63, 0xb9, 0x37, 0x4e, 0x44, 0x56, 0x59, 0x12, 0x07, 0x71, 0x91, 0xa7, 0x84, 0xfb, 0xc3, + 0x48, 0x3c, 0x4a, 0x6f, 0xfe, 0x39, 0x0b, 0x48, 0x26, 0xec, 0xb6, 0x4c, 0xca, 0x64, 0x5c, 0xe4, + 0x65, 0x85, 0x4a, 0xf8, 0x8d, 0xa1, 0xb2, 0x42, 0xe5, 0xfa, 0x21, 0x1e, 0xed, 0x40, 0x51, 0xc6, + 0xa7, 0xf8, 0xce, 0xad, 0x2b, 0xe2, 0xe2, 0x6e, 0x88, 0x38, 0x1b, 0xd4, 0x56, 0x12, 0x6a, 0x22, + 0x8c, 0x28, 0xf9, 0x62, 0x09, 0xe8, 0x2a, 0x80, 0xd5, 0x75, 0xf4, 0xa6, 0x5f, 0x31, 0x6e, 0xfd, + 0xc4, 0xe5, 0x3b, 0xd6, 0xa8, 0xd0, 0x4b, 0x90, 0x0b, 0x3e, 0x5d, 0x59, 0x56, 0x10, 0x55, 0x27, + 0x2f, 0xc2, 0x84, 0x04, 0xae, 0x5d, 0x5c, 0x0a, 0xc6, 0xcd, 0x52, 0x15, 0x55, 0xa4, 0x7d, 0x2b, + 0xc2, 0x60, 0x8d, 0x0a, 0x7d, 0x0b, 0x0a, 0x87, 0x2a, 0x9f, 0x15, 0xa7, 0x9b, 0x3a, 0xce, 0x86, + 0x59, 0xb0, 0xec, 0x3b, 0x84, 0x5f, 0x38, 0x92, 0x86, 0xbe, 0x0a, 0x25, 0xd6, 0x3b, 0x88, 0x52, + 0x00, 0xe9, 0x12, 0xd1, 0x7b, 0xbb, 0x1f, 0xa3, 0xb0, 0x4e, 0x67, 0xbe, 0x09, 0xc5, 0x1d, 0xa7, + 0x45, 0x7d, 0x51, 0x48, 0x3e, 0x01, 0x73, 0x2c, 0x51, 0x25, 0x45, 0x27, 0x19, 0xba, 0x6a, 0x88, + 0xe7, 0x3e, 0xea, 0x59, 0x9e, 0x2f, 0x6b, 0xa1, 0x7c, 0xec, 0xa3, 0xaf, 0x72, 0x20, 0x96, 0xb8, + 0xeb, 0xcb, 0x3c, 0xcb, 0xf8, 0xe9, 0xfb, 0xb5, 0x99, 0x77, 0xdf, 0xaf, 0xcd, 0xbc, 0xf7, 0xbe, + 0xca, 0x38, 0xfe, 0x00, 0x00, 0xbb, 0x07, 0xdf, 0x23, 0x2d, 0x19, 0xbb, 0x53, 0xf5, 0x06, 0xc3, + 0x96, 0xb4, 0xe8, 0x0d, 0x66, 0x86, 0x32, 0x47, 0x0d, 0x87, 0x13, 0x94, 0x68, 0x1d, 0x8a, 0x51, + 0xd7, 0x4f, 0xf9, 0xc7, 0x52, 0xe8, 0x6f, 0x51, 0x6b, 0x10, 0xc7, 0x34, 0x89, 0x87, 0x24, 0x77, + 0xee, 0x43, 0xd2, 0x80, 0x6c, 0xcf, 0xb1, 0x55, 0xd5, 0xfd, 0x74, 0xf8, 0x90, 0xdf, 0xd9, 0x6e, + 0x9e, 0x0d, 0x6a, 0x8f, 0x4c, 0x6a, 0xb6, 0x07, 0xfd, 0x2e, 0x61, 0xf5, 0x3b, 0xdb, 0x4d, 0xcc, + 0x99, 0xc7, 0x45, 0xb5, 0xd9, 0x29, 0xa3, 0xda, 0x55, 0x80, 0x76, 0xdc, 0xbb, 0x90, 0x41, 0x23, + 0x72, 0x44, 0xad, 0x67, 0xa1, 0x51, 0x21, 0x06, 0x4b, 0x2d, 0x5e, 0xdf, 0xab, 0x1e, 0x02, 0x0b, + 0xac, 0x8e, 0xec, 0x86, 0x4e, 0x77, 0x27, 0x2e, 0x29, 0x35, 0x4b, 0x9b, 0xc3, 0xc2, 0xf0, 0xa8, + 0x7c, 0xe4, 0xc3, 0x92, 0xad, 0xca, 0xcc, 0x58, 0x69, 0x71, 0x6a, 0xa5, 0x22, 0x62, 0x35, 0x87, + 0x05, 0xe1, 0x51, 0xd9, 0xe8, 0xbb, 0xb0, 0x12, 0x02, 0x47, 0x6b, 0x7d, 0x11, 0xf5, 0xb3, 0x8d, + 0xd5, 0xd3, 0x41, 0x6d, 0xa5, 0x39, 0x91, 0x0a, 0xdf, 0x47, 0x02, 0xb2, 0x61, 0xd6, 0x95, 0x59, + 0x72, 0x49, 0x64, 0x36, 0x5f, 0x4b, 0xb7, 0x8a, 0xd8, 0xfb, 0xeb, 0x7a, 0x76, 0x1c, 0xf5, 0x6d, + 0x54, 0x62, 0xac, 0x64, 0xa3, 0xb7, 0xa0, 0x64, 0x79, 0x9e, 0x1f, 0x58, 0xb2, 0xfb, 0x50, 0x16, + 0xaa, 0x36, 0xa6, 0x56, 0xb5, 0x11, 0xcb, 0x18, 0xca, 0xc6, 0x35, 0x0c, 0xd6, 0x55, 0xa1, 0x7b, + 0xb0, 0xe0, 0xdf, 0xf3, 0x08, 0xc5, 0xe4, 0x90, 0x50, 0xe2, 0xb5, 0x08, 0xab, 0x56, 0x84, 0xf6, + 0x67, 0x52, 0x6a, 0x4f, 0x30, 0xc7, 0x2e, 0x9d, 0x84, 0x33, 0x3c, 0xac, 0x05, 0xd5, 0x79, 0x6c, + 0xf5, 0x2c, 0xd7, 0xf9, 0x3e, 0xa1, 0xac, 0x3a, 0x1f, 0x37, 0xac, 0xb7, 0x22, 0x28, 0xd6, 0x28, + 0x50, 0x0f, 0x2a, 0x1d, 0xfd, 0xc9, 0xa8, 0x2e, 0x09, 0x33, 0xaf, 0xa5, 0x33, 0x73, 0xf4, 0x51, + 0x8b, 0xd3, 0xa0, 0x04, 0x0e, 0x27, 0xb5, 0xac, 0x3c, 0x07, 0xa5, 0x4f, 0x59, 0x21, 0xf0, 0x0a, + 0x63, 0xf8, 0x40, 0xa6, 0xaa, 0x30, 0xfe, 0x98, 0x81, 0xf9, 0xe4, 0x36, 0x0e, 0x3d, 0x87, 0xf9, + 0x54, 0xcf, 0x61, 0x58, 0xcb, 0x1a, 0x13, 0x27, 0x17, 0x61, 0x7c, 0xce, 0x4e, 0x8c, 0xcf, 0x2a, + 0x0c, 0xe6, 0x1e, 0x24, 0x0c, 0xd6, 0x01, 0x78, 0xb2, 0x42, 0x7d, 0xd7, 0x25, 0x54, 0x44, 0xc0, + 0x82, 0x9a, 0x50, 0x44, 0x50, 0xac, 0x51, 0xf0, 0x94, 0xfa, 0xc0, 0xf5, 0x5b, 0xc7, 0x62, 0x0b, + 0xc2, 0xdb, 0x2b, 0x62, 0x5f, 0x41, 0xa6, 0xd4, 0x8d, 0x11, 0x2c, 0x1e, 0xc3, 0x61, 0xf6, 0xe1, + 0xe2, 0x9e, 0x45, 0x79, 0x92, 0x13, 0xdf, 0x14, 0x51, 0xb3, 0xbc, 0x31, 0x52, 0x11, 0x3d, 0x3d, + 0xed, 0x8d, 0x8b, 0x37, 0x3f, 0x86, 0xc5, 0x55, 0x91, 0xf9, 0x57, 0x03, 0x2e, 0x8d, 0xd5, 0xfd, + 0x19, 0x54, 0x64, 0x6f, 0x24, 0x2b, 0xb2, 0xe7, 0x53, 0xb6, 0x32, 0xc7, 0x59, 0x3b, 0xa1, 0x3e, + 0x9b, 0x83, 0xfc, 0x1e, 0xcf, 0x84, 0xcd, 0x0f, 0x0d, 0x28, 0x8b, 0x5f, 0xd3, 0x74, 0x92, 0x6b, + 0xc9, 0x01, 0x43, 0xf1, 0xe1, 0x0d, 0x17, 0x1e, 0x46, 0xab, 0xf9, 0x1d, 0x03, 0x92, 0x3d, 0x5c, + 0xf4, 0xa2, 0xbc, 0x02, 0x46, 0xd4, 0x64, 0x9d, 0xd2, 0xfd, 0x5f, 0x98, 0x54, 0x92, 0x5e, 0x48, + 0xd5, 0xad, 0x7c, 0x12, 0x8a, 0xd8, 0xf7, 0x83, 0x3d, 0x2b, 0x38, 0x62, 0x7c, 0xef, 0xba, 0xfc, + 0x87, 0xda, 0x5e, 0xb1, 0x77, 0x02, 0x83, 0x25, 0xdc, 0xfc, 0xb9, 0x01, 0x97, 0x26, 0xce, 0x8d, + 0x78, 0x14, 0x69, 0x45, 0x5f, 0x6a, 0x45, 0x91, 0x23, 0xc7, 0x74, 0x58, 0xa3, 0xe2, 0xb5, 0x64, + 0x62, 0xd8, 0x34, 0x5c, 0x4b, 0x26, 0xb4, 0xe1, 0x24, 0xad, 0xf9, 0xcf, 0x0c, 0xa8, 0x41, 0xcd, + 0xff, 0xd8, 0xe9, 0x1f, 0x1f, 0x1a, 0x13, 0xcd, 0x27, 0xc7, 0x44, 0xd1, 0x4c, 0x48, 0x9b, 0x93, + 0x64, 0xef, 0x3f, 0x27, 0x41, 0xcf, 0x46, 0xa3, 0x17, 0xe9, 0x43, 0xab, 0xc9, 0xd1, 0xcb, 0xd9, + 0xa0, 0x56, 0x56, 0xc2, 0x93, 0xa3, 0x98, 0xd7, 0x60, 0xce, 0x26, 0x81, 0xe5, 0xb8, 0xb2, 0x2e, + 0x4c, 0x3d, 0x4c, 0x90, 0xc2, 0x9a, 0x92, 0xb5, 0x51, 0xe2, 0x36, 0xa9, 0x0f, 0x1c, 0x0a, 0xe4, + 0x01, 0xbb, 0xe5, 0xdb, 0xb2, 0x22, 0xc9, 0xc7, 0x01, 0x7b, 0xd3, 0xb7, 0x09, 0x16, 0x18, 0xf3, + 0x5d, 0x03, 0x4a, 0x52, 0xd2, 0xa6, 0xd5, 0x63, 0x04, 0x5d, 0x89, 0x56, 0x21, 0x8f, 0xfb, 0x92, + 0x3e, 0x63, 0x3b, 0x1b, 0xd4, 0x8a, 0x82, 0x4c, 0x14, 0x33, 0x63, 0x66, 0x49, 0x99, 0x73, 0xf6, + 0xe8, 0x51, 0xc8, 0x8b, 0x0b, 0xa4, 0x36, 0x33, 0x1e, 0x16, 0x72, 0x20, 0x96, 0x38, 0xf3, 0xe3, + 0x0c, 0x54, 0x12, 0x8b, 0x4b, 0x51, 0x17, 0x44, 0x2d, 0xd4, 0x4c, 0x8a, 0xb6, 0xfc, 0xe4, 0xd1, + 0xbc, 0x7a, 0xbe, 0x66, 0x1f, 0xe4, 0xf9, 0xfa, 0x36, 0xcc, 0xb6, 0xf8, 0x1e, 0x85, 0xff, 0xf4, + 0xb8, 0x32, 0xcd, 0x71, 0x8a, 0xdd, 0x8d, 0xbd, 0x51, 0x7c, 0x32, 0xac, 0x04, 0xa2, 0x9b, 0xb0, + 0x44, 0x49, 0x40, 0xfb, 0x1b, 0x87, 0x01, 0xa1, 0x7a, 0x33, 0x21, 0x1f, 0x67, 0xdf, 0x78, 0x98, + 0x00, 0x8f, 0xf2, 0x98, 0x07, 0x50, 0xbe, 0x6d, 0x1d, 0xb8, 0xd1, 0x78, 0x0c, 0x43, 0xc5, 0xf1, + 0x5a, 0x6e, 0xcf, 0x26, 0x32, 0xa0, 0x87, 0xd1, 0x2b, 0xbc, 0xb4, 0xdb, 0x3a, 0xf2, 0x6c, 0x50, + 0xbb, 0x90, 0x00, 0xc8, 0x79, 0x10, 0x4e, 0x8a, 0x30, 0x5d, 0xc8, 0x7d, 0x86, 0x95, 0xe4, 0x77, + 0xa0, 0x18, 0xe7, 0xfa, 0x0f, 0x59, 0xa5, 0xf9, 0x06, 0x14, 0xb8, 0xc7, 0x87, 0x35, 0xea, 0x39, + 0x59, 0x52, 0x32, 0xf7, 0xca, 0xa4, 0xc9, 0xbd, 0xc4, 0x90, 0xf5, 0x4e, 0xd7, 0x7e, 0xc0, 0x21, + 0x6b, 0xe6, 0x41, 0x5e, 0xbe, 0xec, 0x94, 0x2f, 0xdf, 0x55, 0x90, 0x7f, 0x44, 0xe1, 0x8f, 0x8c, + 0x4c, 0x20, 0xb4, 0x47, 0x46, 0x7f, 0xff, 0xb5, 0x09, 0xc3, 0x8f, 0x0d, 0x00, 0xd1, 0xca, 0x13, + 0x6d, 0xa4, 0x14, 0xe3, 0xfc, 0x3b, 0x30, 0xeb, 0x4b, 0x8f, 0x94, 0x83, 0xd6, 0x29, 0xfb, 0xc5, + 0xd1, 0x45, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xc6, 0xcb, 0x1f, 0x7c, 0xb2, 0x3a, 0xf3, 0xe1, 0x27, + 0xab, 0x33, 0x1f, 0x7d, 0xb2, 0x3a, 0xf3, 0xf6, 0xe9, 0xaa, 0xf1, 0xc1, 0xe9, 0xaa, 0xf1, 0xe1, + 0xe9, 0xaa, 0xf1, 0xd1, 0xe9, 0xaa, 0xf1, 0xf1, 0xe9, 0xaa, 0xf1, 0xee, 0xdf, 0x57, 0x67, 0x5e, + 0x7b, 0x2c, 0xcd, 0x1f, 0xfc, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x28, 0x27, 0x65, 0xab, 0x20, + 0x28, 0x00, 0x00, } func (m *APIGroup) Marshal() (dAtA []byte, err error) { @@ -2503,6 +2505,16 @@ func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SendInitialEvents != nil { + i-- + if *m.SendInitialEvents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } i -= len(m.ResourceVersionMatch) copy(dAtA[i:], m.ResourceVersionMatch) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersionMatch))) @@ -3908,6 +3920,9 @@ func (m *ListOptions) Size() (n int) { n += 2 l = len(m.ResourceVersionMatch) n += 1 + l + sovGenerated(uint64(l)) + if m.SendInitialEvents != nil { + n += 2 + } return n } @@ -4517,6 +4532,7 @@ func (this *ListOptions) String() string { `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`, `ResourceVersionMatch:` + fmt.Sprintf("%v", this.ResourceVersionMatch) + `,`, + `SendInitialEvents:` + valueToStringGenerated(this.SendInitialEvents) + `,`, `}`, }, "") return s @@ -8250,6 +8266,27 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } m.ResourceVersionMatch = ResourceVersionMatch(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendInitialEvents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SendInitialEvents = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 2be188a6..48955dca 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -246,19 +246,16 @@ message CreateOptions { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -575,6 +572,32 @@ message ListOptions { // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. optional string continue = 8; + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + // +optional + optional bool sendInitialEvents = 11; } // ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource @@ -645,7 +668,7 @@ message ObjectMeta { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names // +optional optional string name = 1; @@ -671,7 +694,7 @@ message ObjectMeta { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces // +optional optional string namespace = 3; @@ -685,7 +708,7 @@ message ObjectMeta { // // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional optional string uid = 5; @@ -749,14 +772,14 @@ message ObjectMeta { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // +optional map labels = 11; // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations // +optional map annotations = 12; @@ -811,11 +834,11 @@ message OwnerReference { optional string kind = 1; // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names optional string name = 3; // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids optional string uid = 4; // If true, this reference points to the managing controller. @@ -889,19 +912,16 @@ message PatchOptions { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -1024,7 +1044,7 @@ message StatusDetails { // UID of the resource. // (when there is a single resource which can be described). - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional optional string uid = 6; @@ -1128,19 +1148,16 @@ message UpdateOptions { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 152f9929..352d58eb 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -114,7 +114,7 @@ type ObjectMeta struct { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` @@ -140,7 +140,7 @@ type ObjectMeta struct { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces // +optional Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` @@ -154,7 +154,7 @@ type ObjectMeta struct { // // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` @@ -218,14 +218,14 @@ type ObjectMeta struct { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // +optional Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations // +optional Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` @@ -295,10 +295,10 @@ type OwnerReference struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // If true, this reference points to the managing controller. // +optional @@ -400,6 +400,32 @@ type ListOptions struct { // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + // +optional + SendInitialEvents *bool `json:"sendInitialEvents,omitempty" protobuf:"varint,11,opt,name=sendInitialEvents"` } // resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch @@ -542,19 +568,16 @@ type CreateOptions struct { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -597,19 +620,16 @@ type PatchOptions struct { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -674,19 +694,16 @@ type UpdateOptions struct { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -761,7 +778,7 @@ type StatusDetails struct { Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // UID of the resource. // (when there is a single resource which can be described). - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // The Causes array includes more details associated with the StatusReason diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 9570726a..b736e837 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_APIGroup = map[string]string{ @@ -115,7 +115,7 @@ var map_CreateOptions = map[string]string{ "": "CreateOptions may be provided when creating an API object.", "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (CreateOptions) SwaggerDoc() map[string]string { @@ -216,6 +216,7 @@ var map_ListOptions = map[string]string{ "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "sendInitialEvents": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", } func (ListOptions) SwaggerDoc() map[string]string { @@ -239,18 +240,18 @@ func (ManagedFieldsEntry) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", - "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", - "namespace": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "namespace": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces", "selfLink": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", - "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", - "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", @@ -264,8 +265,8 @@ var map_OwnerReference = map[string]string{ "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", "apiVersion": "API version of the referent.", "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", + "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "controller": "If true, this reference points to the managing controller.", "blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", } @@ -306,7 +307,7 @@ var map_PatchOptions = map[string]string{ "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", - "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (PatchOptions) SwaggerDoc() map[string]string { @@ -372,7 +373,7 @@ var map_StatusDetails = map[string]string{ "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "uid": "UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", } @@ -451,7 +452,7 @@ var map_UpdateOptions = map[string]string{ "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (UpdateOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go index b7590f0b..afe01ed5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -426,6 +426,13 @@ func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, } else { out.Continue = "" } + if values, ok := map[string][]string(*in)["sendInitialEvents"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.SendInitialEvents, s); err != nil { + return err + } + } else { + out.SendInitialEvents = nil + } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 418e6099..7d29c504 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -602,6 +602,11 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) { *out = new(int64) **out = **in } + if in.SendInitialEvents != nil { + in, out := &in.SendInitialEvents, &out.SendInitialEvents + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index ef7e7c1e..dff735dc 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PartialObjectMetadataList = map[string]string{ diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index 8360d842..19d823ce 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -77,6 +77,8 @@ func (ls Set) AsValidatedSelector() (Selector, error) { // perform any validation. // According to our measurements this is significantly faster // in codepaths that matter at high scale. +// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector +// instead, which does not copy. func (ls Set) AsSelectorPreValidated() Selector { return SelectorFromValidatedSet(ls) } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 89100438..5e601424 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -149,14 +149,12 @@ type Requirement struct { // NewRequirement is the constructor for a Requirement. // If any of these rules is violated, an error is returned: -// (1) The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. -// (2) If the operator is In or NotIn, the values set must be non-empty. -// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. -// (4) If the operator is Exists or DoesNotExist, the value set must be empty. -// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. -// (6) The key is invalid due to its length, or sequence -// -// of characters. See validateLabelKey for more details. +// 1. The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. +// 2. If the operator is In or NotIn, the values set must be non-empty. +// 3. If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// 4. If the operator is Exists or DoesNotExist, the value set must be empty. +// 5. If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// 6. The key is invalid due to its length, or sequence of characters. See validateLabelKey for more details. // // The empty string is a valid value in the input values set. // Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList @@ -213,22 +211,15 @@ func (r *Requirement) hasValue(value string) bool { // Matches returns true if the Requirement matches the input Labels. // There is a match in the following cases: -// (1) The operator is Exists and Labels has the Requirement's key. -// (2) The operator is In, Labels has the Requirement's key and Labels' -// -// value for that key is in Requirement's value set. -// -// (3) The operator is NotIn, Labels has the Requirement's key and -// -// Labels' value for that key is not in Requirement's value set. -// -// (4) The operator is DoesNotExist or NotIn and Labels does not have the -// -// Requirement's key. -// -// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has -// -// the Requirement's key and the corresponding value satisfies mathematical inequality. +// 1. The operator is Exists and Labels has the Requirement's key. +// 2. The operator is In, Labels has the Requirement's key and Labels' +// value for that key is in Requirement's value set. +// 3. The operator is NotIn, Labels has the Requirement's key and +// Labels' value for that key is not in Requirement's value set. +// 4. The operator is DoesNotExist or NotIn and Labels does not have the +// Requirement's key. +// 5. The operator is GreaterThanOperator or LessThanOperator, and Labels has +// the Requirement's key and the corresponding value satisfies mathematical inequality. func (r *Requirement) Matches(ls Labels) bool { switch r.operator { case selection.In, selection.Equals, selection.DoubleEquals: @@ -872,15 +863,14 @@ func (p *Parser) parseExactValue() (sets.String, error) { // "x in (foo,,baz),y,z notin ()" // // Note: -// -// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the -// VALUEs in its requirement -// (2) Exclusion - " notin " - denotes that the KEY is not equal to any -// of the VALUEs in its requirement or does not exist -// (3) The empty string is a valid VALUE -// (4) A requirement with just a KEY - as in "y" above - denotes that -// the KEY exists and can be any VALUE. -// (5) A requirement with just !KEY requires that the KEY not exist. +// 1. Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// 2. Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// 3. The empty string is a valid VALUE +// 4. A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// 5. A requirement with just !KEY requires that the KEY not exist. func Parse(selector string, opts ...field.PathOption) (Selector, error) { parsedSelector, err := parse(selector, field.ToPath(opts...)) if err == nil { @@ -948,6 +938,8 @@ func ValidatedSelectorFromSet(ls Set) (Selector, error) { // SelectorFromValidatedSet returns a Selector which will match exactly the given Set. // A nil and empty Sets are considered equivalent to Everything(). // It assumes that Set is already validated and doesn't do any validation. +// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector +// instead, which does not copy. func SelectorFromValidatedSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} @@ -969,3 +961,76 @@ func SelectorFromValidatedSet(ls Set) Selector { func ParseToRequirements(selector string, opts ...field.PathOption) ([]Requirement, error) { return parse(selector, field.ToPath(opts...)) } + +// ValidatedSetSelector wraps a Set, allowing it to implement the Selector interface. Unlike +// Set.AsSelectorPreValidated (which copies the input Set), this type simply wraps the underlying +// Set. As a result, it is substantially more efficient. A nil and empty Sets are considered +// equivalent to Everything(). +// +// Callers MUST ensure the underlying Set is not mutated, and that it is already validated. If these +// constraints are not met, Set.AsValidatedSelector should be preferred +// +// None of the Selector methods mutate the underlying Set, but Add() and Requirements() convert to +// the less optimized version. +type ValidatedSetSelector Set + +func (s ValidatedSetSelector) Matches(labels Labels) bool { + for k, v := range s { + if !labels.Has(k) || v != labels.Get(k) { + return false + } + } + return true +} + +func (s ValidatedSetSelector) Empty() bool { + return len(s) == 0 +} + +func (s ValidatedSetSelector) String() string { + keys := make([]string, 0, len(s)) + for k := range s { + keys = append(keys, k) + } + // Ensure deterministic output + sort.Strings(keys) + b := strings.Builder{} + for i, key := range keys { + v := s[key] + b.Grow(len(key) + 2 + len(v)) + if i != 0 { + b.WriteString(",") + } + b.WriteString(key) + b.WriteString("=") + b.WriteString(v) + } + return b.String() +} + +func (s ValidatedSetSelector) Add(r ...Requirement) Selector { + return s.toFullSelector().Add(r...) +} + +func (s ValidatedSetSelector) Requirements() (requirements Requirements, selectable bool) { + return s.toFullSelector().Requirements() +} + +func (s ValidatedSetSelector) DeepCopySelector() Selector { + res := make(ValidatedSetSelector, len(s)) + for k, v := range s { + res[k] = v + } + return res +} + +func (s ValidatedSetSelector) RequiresExactMatch(label string) (value string, found bool) { + v, f := s[label] + return v, f +} + +func (s ValidatedSetSelector) toFullSelector() Selector { + return SelectorFromValidatedSet(Set(s)) +} + +var _ Selector = ValidatedSetSelector{} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index b21eb664..54ccb7a7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -191,8 +191,7 @@ func (gv GroupVersion) Identifier() string { // if none of the options match the group. It prefers a match to group and version over just group. // TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// -// in fewer places. +// in fewer places. func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { for _, gvk := range kinds { if gvk.Group == gv.Group && gvk.Version == gv.Version { @@ -240,8 +239,7 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource { // GroupVersions can be used to represent a set of desired group versions. // TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// -// in fewer places. +// in fewer places. type GroupVersions []GroupVersion // Identifier implements runtime.GroupVersioner interface. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index 18b25a99..a5b11671 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -118,8 +118,7 @@ func (s *Scheme) Converter() *conversion.Converter { // API group and version that would never be updated. // // TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into -// -// every version with particular schemas. Resolve this method at that point. +// every version with particular schemas. Resolve this method at that point. func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { s.addObservedVersion(version) s.AddKnownTypes(version, types...) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 21944f2d..ff982084 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -259,8 +259,7 @@ func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { // invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder(). // // TODO: make this call exist only in pkg/api, and initialize it with the set of default versions. -// -// All other callers will be forced to request a Codec directly. +// All other callers will be forced to request a Codec directly. func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec { return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index 44663318..25f955ed 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -147,7 +147,7 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru } if d, ok := obj.(runtime.NestedObjectDecoder); ok { - if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { + if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{Decoder: c.decoder}); err != nil { if strictErr, ok := runtime.AsStrictDecodingError(err); ok { // save the strictDecodingError let and the caller decide what to do with it strictDecodingErrs = append(strictDecodingErrs, strictErr.Errors()...) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 3dc9a5a2..ce77c791 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -123,7 +123,7 @@ type Unknown struct { // Raw will hold the complete serialized object which couldn't be matched // with a registered type. Most likely, nothing should be done with this // except for passing it through the system. - Raw []byte `protobuf:"bytes,2,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,2,opt,name=raw"` // ContentEncoding is encoding used to encode 'Raw' data. // Unspecified means no encoding. ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go index b19750f3..db18ce1c 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -37,3 +37,14 @@ const ( func (n NamespacedName) String() string { return n.Namespace + string(Separator) + n.Name } + +// MarshalLog emits a struct containing required key/value pair +func (n NamespacedName) MarshalLog() interface{} { + return struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` + }{ + Name: n.Name, + Namespace: n.Namespace, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 1f5a04fd..1b60d145 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -214,7 +214,7 @@ func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { return NewAggregate(result) } -// Reduce will return err or, if err is an Aggregate and only has one item, +// Reduce will return err or nil, if err is an Aggregate and only has one item, // the first item in the aggregate. func Reduce(err error) error { if agg, ok := err.(Aggregate); ok && err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index ca08f856..9b3c9c8d 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -32,7 +32,7 @@ func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer { return &lengthDelimitedFrameWriter{w: w} } -// Write writes a single frame to the nested writer, prepending it with the length in +// Write writes a single frame to the nested writer, prepending it with the length // in bytes of data (as a 4 byte, bigendian uint32). func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) { binary.BigEndian.PutUint32(w.h[:], uint32(len(data))) diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml new file mode 100644 index 00000000..a667e983 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml @@ -0,0 +1,7018 @@ +apiVersion: v1 +kind: Endpoints +metadata: + creationTimestamp: '2016-10-04T17:45:58Z' + labels: + app: my-app + name: app-server + namespace: default + resourceVersion: '184597135' + selfLink: /self/link + uid: 6826f086-8a5a-11e6-8d09-42010a800005 +subsets: +- addresses: + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0000 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0001 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0002 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0003 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0004 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0005 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0006 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0007 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0008 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0009 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0010 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0011 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0012 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0013 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0014 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0015 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0016 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0017 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0018 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0019 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0020 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0021 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0022 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0023 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0024 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0025 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0026 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0027 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0028 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0029 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0030 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0031 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0032 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0033 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0034 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0035 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0036 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0037 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0038 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0039 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0040 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0041 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0042 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0043 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0044 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0045 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0046 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0047 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0048 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0049 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0050 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0051 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0052 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0053 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0054 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0055 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0056 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0057 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0058 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0059 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0060 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0061 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0062 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0063 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0064 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0065 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0066 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0067 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0068 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0069 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0070 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0071 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0072 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0073 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0074 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0075 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0076 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0077 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0078 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0079 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0080 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0081 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0082 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0083 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0084 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0085 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0086 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0087 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0088 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0089 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0090 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0091 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0092 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0093 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0094 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0095 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0096 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0097 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0098 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0099 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0100 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0101 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0102 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0103 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0104 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0105 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0106 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0107 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0108 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0109 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0110 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0111 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0112 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0113 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0114 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0115 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0116 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0117 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0118 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0119 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0120 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0121 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0122 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0123 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0124 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0125 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0126 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0127 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0128 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0129 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0130 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0131 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0132 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0133 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0134 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0135 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0136 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0137 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0138 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0139 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0140 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0141 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0142 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0143 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0144 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0145 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0146 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0147 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0148 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0149 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0150 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0151 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0152 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0153 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0154 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0155 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0156 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0157 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0158 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0159 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0160 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0161 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0162 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0163 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0164 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0165 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0166 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0167 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0168 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0169 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0170 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0171 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0172 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0173 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0174 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0175 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0176 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0177 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0178 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0179 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0180 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0181 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0182 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0183 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0184 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0185 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0186 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0187 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0188 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0189 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0190 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0191 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0192 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0193 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0194 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0195 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0196 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0197 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0198 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0199 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0200 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0201 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0202 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0203 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0204 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0205 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0206 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0207 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0208 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0209 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0210 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0211 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0212 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0213 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0214 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0215 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0216 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0217 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0218 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0219 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0220 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0221 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0222 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0223 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0224 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0225 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0226 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0227 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0228 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0229 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0230 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0231 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0232 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0233 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0234 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0235 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0236 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0237 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0238 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0239 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0240 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0241 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0242 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0243 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0244 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0245 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0246 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0247 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0248 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0249 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0250 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0251 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0252 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0253 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0254 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0255 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0256 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0257 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0258 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0259 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0260 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0261 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0262 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0263 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0264 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0265 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0266 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0267 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0268 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0269 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0270 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0271 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0272 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0273 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0274 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0275 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0276 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0277 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0278 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0279 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0280 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0281 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0282 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0283 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0284 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0285 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0286 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0287 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0288 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0289 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0290 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0291 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0292 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0293 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0294 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0295 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0296 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0297 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0298 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0299 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0300 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0301 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0302 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0303 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0304 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0305 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0306 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0307 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0308 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0309 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0310 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0311 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0312 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0313 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0314 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0315 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0316 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0317 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0318 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0319 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0320 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0321 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0322 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0323 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0324 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0325 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0326 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0327 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0328 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0329 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0330 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0331 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0332 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0333 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0334 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0335 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0336 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0337 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0338 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0339 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0340 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0341 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0342 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0343 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0344 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0345 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0346 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0347 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0348 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0349 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0350 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0351 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0352 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0353 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0354 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0355 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0356 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0357 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0358 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0359 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0360 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0361 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0362 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0363 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0364 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0365 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0366 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0367 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0368 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0369 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0370 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0371 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0372 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0373 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0374 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0375 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0376 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0377 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0378 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0379 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0380 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0381 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0382 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0383 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0384 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0385 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0386 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0387 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0388 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0389 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0390 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0391 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0392 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0393 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0394 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0395 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0396 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0397 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0398 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0399 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0400 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0401 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0402 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0403 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0404 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0405 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0406 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0407 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0408 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0409 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0410 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0411 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0412 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0413 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0414 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0415 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0416 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0417 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0418 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0419 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0420 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0421 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0422 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0423 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0424 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0425 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0426 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0427 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0428 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0429 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0430 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0431 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0432 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0433 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0434 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0435 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0436 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0437 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0438 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0439 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0440 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0441 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0442 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0443 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0444 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0445 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0446 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0447 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0448 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0449 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0450 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0451 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0452 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0453 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0454 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0455 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0456 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0457 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0458 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0459 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0460 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0461 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0462 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0463 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0464 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0465 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0466 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0467 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0468 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0469 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0470 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0471 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0472 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0473 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0474 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0475 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0476 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0477 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0478 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0479 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0480 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0481 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0482 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0483 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0484 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0485 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0486 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0487 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0488 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0489 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0490 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0491 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0492 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0493 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0494 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0495 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0496 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0497 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0498 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0499 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0500 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0501 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0502 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0503 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0504 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0505 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0506 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0507 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0508 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0509 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0510 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0511 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0512 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0513 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0514 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0515 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0516 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0517 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0518 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0519 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0520 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0521 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0522 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0523 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0524 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0525 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0526 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0527 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0528 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0529 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0530 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0531 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0532 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0533 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0534 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0535 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0536 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0537 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0538 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0539 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0540 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0541 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0542 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0543 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0544 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0545 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0546 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0547 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0548 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0549 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0550 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0551 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0552 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0553 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0554 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0555 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0556 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0557 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0558 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0559 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0560 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0561 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0562 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0563 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0564 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0565 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0566 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0567 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0568 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0569 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0570 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0571 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0572 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0573 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0574 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0575 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0576 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0577 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0578 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0579 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0580 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0581 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0582 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0583 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0584 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0585 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0586 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0587 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0588 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0589 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0590 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0591 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0592 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0593 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0594 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0595 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0596 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0597 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0598 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0599 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0600 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0601 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0602 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0603 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0604 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0605 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0606 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0607 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0608 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0609 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0610 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0611 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0612 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0613 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0614 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0615 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0616 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0617 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0618 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0619 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0620 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0621 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0622 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0623 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0624 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0625 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0626 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0627 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0628 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0629 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0630 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0631 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0632 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0633 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0634 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0635 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0636 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0637 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0638 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0639 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0640 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0641 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0642 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0643 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0644 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0645 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0646 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0647 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0648 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0649 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0650 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0651 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0652 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0653 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0654 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0655 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0656 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0657 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0658 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0659 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0660 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0661 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0662 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0663 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0664 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0665 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0666 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0667 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0668 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0669 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0670 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0671 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0672 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0673 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0674 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0675 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0676 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0677 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0678 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0679 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0680 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0681 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0682 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0683 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0684 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0685 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0686 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0687 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0688 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0689 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0690 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0691 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0692 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0693 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0694 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0695 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0696 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0697 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0698 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0699 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0700 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0701 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0702 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0703 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0704 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0705 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0706 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0707 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0708 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0709 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0710 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0711 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0712 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0713 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0714 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0715 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0716 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0717 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0718 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0719 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0720 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0721 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0722 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0723 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0724 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0725 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0726 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0727 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0728 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0729 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0730 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0731 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0732 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0733 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0734 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0735 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0736 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0737 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0738 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0739 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0740 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0741 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0742 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0743 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0744 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0745 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0746 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0747 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0748 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0749 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0750 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0751 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0752 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0753 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0754 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0755 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0756 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0757 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0758 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0759 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0760 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0761 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0762 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0763 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0764 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0765 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0766 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0767 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0768 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0769 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0770 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0771 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0772 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0773 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0774 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0775 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0776 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0777 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0778 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0779 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0780 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0781 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0782 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0783 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0784 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0785 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0786 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0787 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0788 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0789 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0790 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0791 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0792 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0793 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0794 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0795 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0796 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0797 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0798 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0799 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0800 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0801 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0802 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0803 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0804 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0805 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0806 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0807 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0808 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0809 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0810 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0811 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0812 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0813 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0814 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0815 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0816 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0817 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0818 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0819 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0820 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0821 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0822 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0823 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0824 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0825 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0826 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0827 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0828 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0829 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0830 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0831 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0832 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0833 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0834 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0835 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0836 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0837 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0838 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0839 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0840 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0841 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0842 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0843 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0844 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0845 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0846 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0847 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0848 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0849 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0850 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0851 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0852 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0853 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0854 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0855 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0856 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0857 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0858 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0859 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0860 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0861 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0862 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0863 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0864 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0865 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0866 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0867 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0868 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0869 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0870 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0871 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0872 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0873 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0874 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0875 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0876 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0877 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0878 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0879 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0880 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0881 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0882 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0883 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0884 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0885 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0886 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0887 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0888 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0889 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0890 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0891 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0892 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0893 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0894 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0895 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0896 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0897 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0898 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0899 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0900 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0901 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0902 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0903 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0904 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0905 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0906 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0907 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0908 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0909 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0910 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0911 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0912 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0913 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0914 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0915 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0916 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0917 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0918 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0919 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0920 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0921 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0922 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0923 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0924 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0925 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0926 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0927 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0928 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0929 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0930 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0931 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0932 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0933 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0934 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0935 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0936 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0937 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0938 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0939 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0940 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0941 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0942 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0943 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0944 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0945 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0946 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0947 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0948 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0949 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0950 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0951 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0952 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0953 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0954 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0955 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0956 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0957 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0958 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0959 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0960 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0961 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0962 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0963 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0964 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0965 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0966 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0967 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0968 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0969 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0970 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0971 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0972 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0973 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0974 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0975 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0976 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0977 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0978 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0979 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0980 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0981 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0982 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0983 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0984 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0985 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0986 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0987 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0988 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0989 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0990 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0991 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0992 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0993 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0994 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0995 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0996 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0997 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0998 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0999 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + ports: + - name: port-name + port: 8080 + protocol: TCP + diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go new file mode 100644 index 00000000..978ffb3c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// FieldManager updates the managed fields and merges applied +// configurations. +type FieldManager = internal.FieldManager + +// NewDefaultFieldManager creates a new FieldManager that merges apply requests +// and update managed fields for other types of requests. +func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) { + f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil +} + +// NewDefaultCRDFieldManager creates a new FieldManager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) { + f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil +} + +func ValidateManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) error { + _, err := internal.DecodeManagedFields(encodedManagedFields) + return err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go new file mode 100644 index 00000000..b75ef741 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "sync" + "time" +) + +// AtMostEvery will never run the method more than once every specified +// duration. +type AtMostEvery struct { + delay time.Duration + lastCall time.Time + mutex sync.Mutex +} + +// NewAtMostEvery creates a new AtMostEvery, that will run the method at +// most every given duration. +func NewAtMostEvery(delay time.Duration) *AtMostEvery { + return &AtMostEvery{ + delay: delay, + } +} + +// updateLastCall returns true if the lastCall time has been updated, +// false if it was too early. +func (s *AtMostEvery) updateLastCall() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + if time.Since(s.lastCall) < s.delay { + return false + } + s.lastCall = time.Now() + return true +} + +// Do will run the method if enough time has passed, and return true. +// Otherwise, it does nothing and returns false. +func (s *AtMostEvery) Do(fn func()) bool { + if !s.updateLastCall() { + return false + } + fn() + return true +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go new file mode 100644 index 00000000..fa342ca1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type buildManagerInfoManager struct { + fieldManager Manager + groupVersion schema.GroupVersion + subresource string +} + +var _ Manager = &buildManagerInfoManager{} + +// NewBuildManagerInfoManager creates a new Manager that converts the manager name into a unique identifier +// combining operation and version for update requests, and just operation for apply requests. +func NewBuildManagerInfoManager(f Manager, gv schema.GroupVersion, subresource string) Manager { + return &buildManagerInfoManager{ + fieldManager: f, + groupVersion: gv, + subresource: subresource, + } +} + +// Update implements Manager. +func (f *buildManagerInfoManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationUpdate) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *buildManagerInfoManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationApply) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) +} + +func (f *buildManagerInfoManager) buildManagerInfo(prefix string, operation metav1.ManagedFieldsOperationType) (string, error) { + managerInfo := metav1.ManagedFieldsEntry{ + Manager: prefix, + Operation: operation, + APIVersion: f.groupVersion.String(), + Subresource: f.subresource, + } + if managerInfo.Manager == "" { + managerInfo.Manager = "unknown" + } + return BuildManagerIdentifier(&managerInfo) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go new file mode 100644 index 00000000..8951932b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "sort" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type capManagersManager struct { + fieldManager Manager + maxUpdateManagers int + oldUpdatesManagerName string +} + +var _ Manager = &capManagersManager{} + +// NewCapManagersManager creates a new wrapped FieldManager which ensures that the number of managers from updates +// does not exceed maxUpdateManagers, by merging some of the oldest entries on each update. +func NewCapManagersManager(fieldManager Manager, maxUpdateManagers int) Manager { + return &capManagersManager{ + fieldManager: fieldManager, + maxUpdateManagers: maxUpdateManagers, + oldUpdatesManagerName: "ancient-changes", + } +} + +// Update implements Manager. +func (f *capManagersManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return object, managed, err + } + if managed, err = f.capUpdateManagers(managed); err != nil { + return nil, nil, fmt.Errorf("failed to cap update managers: %v", err) + } + return object, managed, nil +} + +// Apply implements Manager. +func (f *capManagersManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} + +// capUpdateManagers merges a number of the oldest update entries into versioned buckets, +// such that the number of entries from updates does not exceed f.maxUpdateManagers. +func (f *capManagersManager) capUpdateManagers(managed Managed) (newManaged Managed, err error) { + // Gather all entries from updates + updaters := []string{} + for manager, fields := range managed.Fields() { + if !fields.Applied() { + updaters = append(updaters, manager) + } + } + if len(updaters) <= f.maxUpdateManagers { + return managed, nil + } + + // If we have more than the maximum, sort the update entries by time, oldest first. + sort.Slice(updaters, func(i, j int) bool { + iTime, jTime, iSeconds, jSeconds := managed.Times()[updaters[i]], managed.Times()[updaters[j]], int64(0), int64(0) + if iTime != nil { + iSeconds = iTime.Unix() + } + if jTime != nil { + jSeconds = jTime.Unix() + } + if iSeconds != jSeconds { + return iSeconds < jSeconds + } + return updaters[i] < updaters[j] + }) + + // Merge the oldest updaters with versioned bucket managers until the number of updaters is under the cap + versionToFirstManager := map[string]string{} + for i, length := 0, len(updaters); i < len(updaters) && length > f.maxUpdateManagers; i++ { + manager := updaters[i] + vs := managed.Fields()[manager] + time := managed.Times()[manager] + version := string(vs.APIVersion()) + + // Create a new manager identifier for the versioned bucket entry. + // The version for this manager comes from the version of the update being merged into the bucket. + bucket, err := BuildManagerIdentifier(&metav1.ManagedFieldsEntry{ + Manager: f.oldUpdatesManagerName, + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: version, + }) + if err != nil { + return managed, fmt.Errorf("failed to create bucket manager for version %v: %v", version, err) + } + + // Merge the fieldets if this is not the first time the version was seen. + // Otherwise just record the manager name in versionToFirstManager + if first, ok := versionToFirstManager[version]; ok { + // If the bucket doesn't exists yet, create one. + if _, ok := managed.Fields()[bucket]; !ok { + s := managed.Fields()[first] + delete(managed.Fields(), first) + managed.Fields()[bucket] = s + } + + managed.Fields()[bucket] = fieldpath.NewVersionedSet(vs.Set().Union(managed.Fields()[bucket].Set()), vs.APIVersion(), vs.Applied()) + delete(managed.Fields(), manager) + length-- + + // Use the time from the update being merged into the bucket, since it is more recent. + managed.Times()[bucket] = time + } else { + versionToFirstManager[version] = manager + } + } + + return managed, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go new file mode 100644 index 00000000..8c044c91 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// NewConflictError returns an error including details on the requests apply conflicts +func NewConflictError(conflicts merge.Conflicts) *errors.StatusError { + causes := []metav1.StatusCause{} + for _, conflict := range conflicts { + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseTypeFieldManagerConflict, + Message: fmt.Sprintf("conflict with %v", printManager(conflict.Manager)), + Field: conflict.Path.String(), + }) + } + return errors.NewApplyConflict(causes, getConflictMessage(conflicts)) +} + +func getConflictMessage(conflicts merge.Conflicts) string { + if len(conflicts) == 1 { + return fmt.Sprintf("Apply failed with 1 conflict: conflict with %v: %v", printManager(conflicts[0].Manager), conflicts[0].Path) + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + uniqueManagers := []string{} + for manager := range m { + uniqueManagers = append(uniqueManagers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(uniqueManagers) + + messages := []string{} + for _, manager := range uniqueManagers { + messages = append(messages, fmt.Sprintf("conflicts with %v:", printManager(manager))) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return fmt.Sprintf("Apply failed with %d conflicts: %s", len(conflicts), strings.Join(messages, "\n")) +} + +func printManager(manager string) string { + encodedManager := &metav1.ManagedFieldsEntry{} + if err := json.Unmarshal([]byte(manager), encodedManager); err != nil { + return fmt.Sprintf("%q", manager) + } + managerStr := fmt.Sprintf("%q", encodedManager.Manager) + if encodedManager.Subresource != "" { + managerStr = fmt.Sprintf("%s with subresource %q", managerStr, encodedManager.Subresource) + } + if encodedManager.Operation == metav1.ManagedFieldsOperationUpdate { + if encodedManager.Time == nil { + return fmt.Sprintf("%s using %v", managerStr, encodedManager.APIVersion) + } + return fmt.Sprintf("%s using %v at %v", managerStr, encodedManager.APIVersion, encodedManager.Time.UTC().Format(time.RFC3339)) + } + return managerStr +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go new file mode 100644 index 00000000..f3111d4b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go @@ -0,0 +1,206 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates +// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum. +// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries. +const DefaultMaxUpdateManagers int = 10 + +// DefaultTrackOnCreateProbability defines the default probability that the field management of an object +// starts being tracked from the object's creation, instead of from the first time the object is applied to. +const DefaultTrackOnCreateProbability float32 = 1 + +var atMostEverySecond = NewAtMostEvery(time.Second) + +// FieldManager updates the managed fields and merges applied +// configurations. +type FieldManager struct { + fieldManager Manager + subresource string +} + +// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields +// on update and apply requests. +func NewFieldManager(f Manager, subresource string) *FieldManager { + return &FieldManager{fieldManager: f, subresource: subresource} +} + +// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic. +func NewDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, subresource string) *FieldManager { + return NewFieldManager( + NewLastAppliedUpdater( + NewLastAppliedManager( + NewProbabilisticSkipNonAppliedManager( + NewCapManagersManager( + NewBuildManagerInfoManager( + NewManagedFieldsUpdater( + NewStripMetaManager(f), + ), kind.GroupVersion(), subresource, + ), DefaultMaxUpdateManagers, + ), objectCreater, kind, DefaultTrackOnCreateProbability, + ), typeConverter, objectConverter, kind.GroupVersion()), + ), subresource, + ) +} + +func decodeLiveOrNew(liveObj, newObj runtime.Object, ignoreManagedFieldsFromRequestObject bool) (Managed, error) { + liveAccessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, err + } + + // We take the managedFields of the live object in case the request tries to + // manually set managedFields via a subresource. + if ignoreManagedFieldsFromRequestObject { + return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) + } + + // If the object doesn't have metadata, we should just return without trying to + // set the managedFields at all, so creates/updates/patches will work normally. + newAccessor, err := meta.Accessor(newObj) + if err != nil { + return nil, err + } + + if isResetManagedFields(newAccessor.GetManagedFields()) { + return NewEmptyManaged(), nil + } + + // If the managed field is empty or we failed to decode it, + // let's try the live object. This is to prevent clients who + // don't understand managedFields from deleting it accidentally. + managed, err := DecodeManagedFields(newAccessor.GetManagedFields()) + if err != nil || len(managed.Fields()) == 0 { + return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) + } + return managed, nil +} + +func emptyManagedFieldsOnErr(managed Managed, err error) (Managed, error) { + if err != nil { + return NewEmptyManaged(), nil + } + return managed, nil +} + +// Update is used when the object has already been merged (non-apply +// use-case), and simply updates the managed fields in the output +// object. +func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) { + // First try to decode the managed fields provided in the update, + // This is necessary to allow directly updating managed fields. + isSubresource := f.subresource != "" + managed, err := decodeLiveOrNew(liveObj, newObj, isSubresource) + if err != nil { + return newObj, nil + } + + RemoveObjectManagedFields(newObj) + + if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil { + return nil, err + } + + if err = EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} + +// UpdateNoErrors is the same as Update, but it will not return +// errors. If an error happens, the object is returned with +// managedFields cleared. +func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object { + obj, err := f.Update(liveObj, newObj, manager) + if err != nil { + atMostEverySecond.Do(func() { + ns, name := "unknown", "unknown" + if accessor, err := meta.Accessor(newObj); err == nil { + ns = accessor.GetNamespace() + name = accessor.GetName() + } + + klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "versionKind", + newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name) + }) + // Explicitly remove managedFields on failure, so that + // we can't have garbage in it. + RemoveObjectManagedFields(newObj) + return newObj + } + return obj +} + +// Returns true if the managedFields indicate that the user is trying to +// reset the managedFields, i.e. if the list is non-nil but empty, or if +// the list has one empty item. +func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool { + if len(managedFields) == 0 { + return managedFields != nil + } + + if len(managedFields) == 1 { + return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{}) + } + + return false +} + +// Apply is used when server-side apply is called, as it merges the +// object and updates the managed fields. +func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) { + // If the object doesn't have metadata, apply isn't allowed. + accessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, fmt.Errorf("couldn't get accessor: %v", err) + } + + // Decode the managed fields in the live object, since it isn't allowed in the patch. + managed, err := DecodeManagedFields(accessor.GetManagedFields()) + if err != nil { + return nil, fmt.Errorf("failed to decode managed fields: %v", err) + } + + object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + if conflicts, ok := err.(merge.Conflicts); ok { + return nil, NewConflictError(conflicts) + } + return nil, err + } + + if err = EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go new file mode 100644 index 00000000..08186191 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// EmptyFields represents a set with no paths +// It looks like metav1.Fields{Raw: []byte("{}")} +var EmptyFields = func() metav1.FieldsV1 { + f, err := SetToFields(*fieldpath.NewSet()) + if err != nil { + panic("should never happen") + } + return f +}() + +// FieldsToSet creates a set paths from an input trie of fields +func FieldsToSet(f metav1.FieldsV1) (s fieldpath.Set, err error) { + err = s.FromJSON(bytes.NewReader(f.Raw)) + return s, err +} + +// SetToFields creates a trie of fields from an input set of paths +func SetToFields(s fieldpath.Set) (f metav1.FieldsV1, err error) { + f.Raw, err = s.ToJSON() + return f, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go new file mode 100644 index 00000000..b00b6b82 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go @@ -0,0 +1,50 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" +) + +// LastAppliedConfigAnnotation is the annotation used to store the previous +// configuration of a resource for use in a three way diff by UpdateApplyAnnotation. +// +// This is a copy of the corev1 annotation since we don't want to depend on the whole package. +const LastAppliedConfigAnnotation = "kubectl.kubernetes.io/last-applied-configuration" + +// SetLastApplied sets the last-applied annotation the given value in +// the object. +func SetLastApplied(obj runtime.Object, value string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[LastAppliedConfigAnnotation] = value + if err := apimachineryvalidation.ValidateAnnotationsSize(annotations); err != nil { + delete(annotations, LastAppliedConfigAnnotation) + } + accessor.SetAnnotations(annotations) + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go new file mode 100644 index 00000000..3f6cf882 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go @@ -0,0 +1,171 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +type lastAppliedManager struct { + fieldManager Manager + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + groupVersion schema.GroupVersion +} + +var _ Manager = &lastAppliedManager{} + +// NewLastAppliedManager converts the client-side apply annotation to +// server-side apply managed fields +func NewLastAppliedManager(fieldManager Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, groupVersion schema.GroupVersion) Manager { + return &lastAppliedManager{ + fieldManager: fieldManager, + typeConverter: typeConverter, + objectConverter: objectConverter, + groupVersion: groupVersion, + } +} + +// Update implements Manager. +func (f *lastAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply will consider the last-applied annotation +// for upgrading an object managed by client-side apply to server-side apply +// without conflicts. +func (f *lastAppliedManager) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newLiveObj, newManaged, newErr := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + // Upgrade the client-side apply annotation only from kubectl server-side-apply. + // To opt-out of this behavior, users may specify a different field manager. + if manager != "kubectl" { + return newLiveObj, newManaged, newErr + } + + // Check if we have conflicts + if newErr == nil { + return newLiveObj, newManaged, newErr + } + conflicts, ok := newErr.(merge.Conflicts) + if !ok { + return newLiveObj, newManaged, newErr + } + conflictSet := conflictsToSet(conflicts) + + // Check if conflicts are allowed due to client-side apply, + // and if so, then force apply + allowedConflictSet, err := f.allowedConflictsFromLastApplied(liveObj) + if err != nil { + return newLiveObj, newManaged, newErr + } + if !conflictSet.Difference(allowedConflictSet).Empty() { + newConflicts := conflictsDifference(conflicts, allowedConflictSet) + return newLiveObj, newManaged, newConflicts + } + + return f.fieldManager.Apply(liveObj, newObj, managed, manager, true) +} + +func (f *lastAppliedManager) allowedConflictsFromLastApplied(liveObj runtime.Object) (*fieldpath.Set, error) { + var accessor, err = meta.Accessor(liveObj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // If there is no client-side apply annotation, then there is nothing to do + var annotations = accessor.GetAnnotations() + if annotations == nil { + return nil, fmt.Errorf("no last applied annotation") + } + var lastApplied, ok = annotations[LastAppliedConfigAnnotation] + if !ok || lastApplied == "" { + return nil, fmt.Errorf("no last applied annotation") + } + + liveObjVersioned, err := f.objectConverter.ConvertToVersion(liveObj, f.groupVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to versioned: %v", err) + } + + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to typed: %v", err) + } + + var lastAppliedObj = &unstructured.Unstructured{Object: map[string]interface{}{}} + err = json.Unmarshal([]byte(lastApplied), lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to decode last applied obj: %v in '%s'", err, lastApplied) + } + + if lastAppliedObj.GetAPIVersion() != f.groupVersion.String() { + return nil, fmt.Errorf("expected version of last applied to match live object '%s', but got '%s': %v", f.groupVersion.String(), lastAppliedObj.GetAPIVersion(), err) + } + + lastAppliedObjTyped, err := f.typeConverter.ObjectToTyped(lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to convert last applied to typed: %v", err) + } + + lastAppliedObjFieldSet, err := lastAppliedObjTyped.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create fieldset for last applied object: %v", err) + } + + comparison, err := lastAppliedObjTyped.Compare(liveObjTyped) + if err != nil { + return nil, fmt.Errorf("failed to compare last applied object and live object: %v", err) + } + + // Remove fields in last applied that are different, added, or missing in + // the live object. + // Because last-applied fields don't match the live object fields, + // then we don't own these fields. + lastAppliedObjFieldSet = lastAppliedObjFieldSet. + Difference(comparison.Modified). + Difference(comparison.Added). + Difference(comparison.Removed) + + return lastAppliedObjFieldSet, nil +} + +// TODO: replace with merge.Conflicts.ToSet() +func conflictsToSet(conflicts merge.Conflicts) *fieldpath.Set { + conflictSet := fieldpath.NewSet() + for _, conflict := range []merge.Conflict(conflicts) { + conflictSet.Insert(conflict.Path) + } + return conflictSet +} + +func conflictsDifference(conflicts merge.Conflicts, s *fieldpath.Set) merge.Conflicts { + newConflicts := []merge.Conflict{} + for _, conflict := range []merge.Conflict(conflicts) { + if !s.Has(conflict.Path) { + newConflicts = append(newConflicts, conflict) + } + } + return newConflicts +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go new file mode 100644 index 00000000..06e6c5d8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go @@ -0,0 +1,102 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +type lastAppliedUpdater struct { + fieldManager Manager +} + +var _ Manager = &lastAppliedUpdater{} + +// NewLastAppliedUpdater sets the client-side apply annotation up to date with +// server-side apply managed fields +func NewLastAppliedUpdater(fieldManager Manager) Manager { + return &lastAppliedUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *lastAppliedUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// server-side apply managed fields +func (f *lastAppliedUpdater) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + liveObj, managed, err := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + if err != nil { + return liveObj, managed, err + } + + // Sync the client-side apply annotation only from kubectl server-side apply. + // To opt-out of this behavior, users may specify a different field manager. + // + // If the client-side apply annotation doesn't exist, + // then continue because we have no annotation to update + if manager == "kubectl" && hasLastApplied(liveObj) { + lastAppliedValue, err := buildLastApplied(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to build last-applied annotation: %v", err) + } + err = SetLastApplied(liveObj, lastAppliedValue) + if err != nil { + return nil, nil, fmt.Errorf("failed to set last-applied annotation: %v", err) + } + } + return liveObj, managed, err +} + +func hasLastApplied(obj runtime.Object) bool { + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + return false + } + lastApplied, ok := annotations[LastAppliedConfigAnnotation] + return ok && len(lastApplied) > 0 +} + +func buildLastApplied(obj runtime.Object) (string, error) { + obj = obj.DeepCopyObject() + + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // Remove the annotation from the object before encoding the object + var annotations = accessor.GetAnnotations() + delete(annotations, LastAppliedConfigAnnotation) + accessor.SetAnnotations(annotations) + + lastApplied, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return "", fmt.Errorf("couldn't encode object into last applied annotation: %v", err) + } + return string(lastApplied), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go new file mode 100644 index 00000000..9b4c2032 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go @@ -0,0 +1,248 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type ManagedInterface interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +type managedStruct struct { + fields fieldpath.ManagedFields + times map[string]*metav1.Time +} + +var _ ManagedInterface = &managedStruct{} + +// Fields implements ManagedInterface. +func (m *managedStruct) Fields() fieldpath.ManagedFields { + return m.fields +} + +// Times implements ManagedInterface. +func (m *managedStruct) Times() map[string]*metav1.Time { + return m.times +} + +// NewEmptyManaged creates an empty ManagedInterface. +func NewEmptyManaged() ManagedInterface { + return NewManaged(fieldpath.ManagedFields{}, map[string]*metav1.Time{}) +} + +// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation. +func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface { + return &managedStruct{ + fields: f, + times: t, + } +} + +// RemoveObjectManagedFields removes the ManagedFields from the object +// before we merge so that it doesn't appear in the ManagedFields +// recursively. +func RemoveObjectManagedFields(obj runtime.Object) { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + accessor.SetManagedFields(nil) +} + +// EncodeObjectManagedFields converts and stores the fieldpathManagedFields into the objects ManagedFields +func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + encodedManagedFields, err := encodeManagedFields(managed) + if err != nil { + return fmt.Errorf("failed to convert back managed fields to API: %v", err) + } + accessor.SetManagedFields(encodedManagedFields) + + return nil +} + +// DecodeManagedFields converts ManagedFields from the wire format (api format) +// to the format used by sigs.k8s.io/structured-merge-diff +func DecodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (ManagedInterface, error) { + managed := managedStruct{} + managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields)) + managed.times = make(map[string]*metav1.Time, len(encodedManagedFields)) + + for i, encodedVersionedSet := range encodedManagedFields { + switch encodedVersionedSet.Operation { + case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate: + default: + return nil, fmt.Errorf("operation must be `Apply` or `Update`") + } + if len(encodedVersionedSet.APIVersion) < 1 { + return nil, fmt.Errorf("apiVersion must not be empty") + } + switch encodedVersionedSet.FieldsType { + case "FieldsV1": + // Valid case. + case "": + return nil, fmt.Errorf("missing fieldsType in managed fields entry %d", i) + default: + return nil, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i) + } + manager, err := BuildManagerIdentifier(&encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err) + } + managed.fields[manager], err = decodeVersionedSet(&encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err) + } + managed.times[manager] = encodedVersionedSet.Time + } + return &managed, nil +} + +// BuildManagerIdentifier creates a manager identifier string from a ManagedFieldsEntry +func BuildManagerIdentifier(encodedManager *metav1.ManagedFieldsEntry) (manager string, err error) { + encodedManagerCopy := *encodedManager + + // Never include fields type in the manager identifier + encodedManagerCopy.FieldsType = "" + + // Never include the fields in the manager identifier + encodedManagerCopy.FieldsV1 = nil + + // Never include the time in the manager identifier + encodedManagerCopy.Time = nil + + // For appliers, don't include the APIVersion in the manager identifier, + // so it will always have the same manager identifier each time it applied. + if encodedManager.Operation == metav1.ManagedFieldsOperationApply { + encodedManagerCopy.APIVersion = "" + } + + // Use the remaining fields to build the manager identifier + b, err := json.Marshal(&encodedManagerCopy) + if err != nil { + return "", fmt.Errorf("error marshalling manager identifier: %v", err) + } + + return string(b), nil +} + +func decodeVersionedSet(encodedVersionedSet *metav1.ManagedFieldsEntry) (versionedSet fieldpath.VersionedSet, err error) { + fields := EmptyFields + if encodedVersionedSet.FieldsV1 != nil { + fields = *encodedVersionedSet.FieldsV1 + } + set, err := FieldsToSet(fields) + if err != nil { + return nil, fmt.Errorf("error decoding set: %v", err) + } + return fieldpath.NewVersionedSet(&set, fieldpath.APIVersion(encodedVersionedSet.APIVersion), encodedVersionedSet.Operation == metav1.ManagedFieldsOperationApply), nil +} + +// encodeManagedFields converts ManagedFields from the format used by +// sigs.k8s.io/structured-merge-diff to the wire format (api format) +func encodeManagedFields(managed ManagedInterface) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) { + if len(managed.Fields()) == 0 { + return nil, nil + } + encodedManagedFields = []metav1.ManagedFieldsEntry{} + for manager := range managed.Fields() { + versionedSet := managed.Fields()[manager] + v, err := encodeManagerVersionedSet(manager, versionedSet) + if err != nil { + return nil, fmt.Errorf("error encoding versioned set for %v: %v", manager, err) + } + if t, ok := managed.Times()[manager]; ok { + v.Time = t + } + encodedManagedFields = append(encodedManagedFields, *v) + } + return sortEncodedManagedFields(encodedManagedFields) +} + +func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (sortedManagedFields []metav1.ManagedFieldsEntry, err error) { + sort.Slice(encodedManagedFields, func(i, j int) bool { + p, q := encodedManagedFields[i], encodedManagedFields[j] + + if p.Operation != q.Operation { + return p.Operation < q.Operation + } + + pSeconds, qSeconds := int64(0), int64(0) + if p.Time != nil { + pSeconds = p.Time.Unix() + } + if q.Time != nil { + qSeconds = q.Time.Unix() + } + if pSeconds != qSeconds { + return pSeconds < qSeconds + } + + if p.Manager != q.Manager { + return p.Manager < q.Manager + } + + if p.APIVersion != q.APIVersion { + return p.APIVersion < q.APIVersion + } + return p.Subresource < q.Subresource + }) + + return encodedManagedFields, nil +} + +func encodeManagerVersionedSet(manager string, versionedSet fieldpath.VersionedSet) (encodedVersionedSet *metav1.ManagedFieldsEntry, err error) { + encodedVersionedSet = &metav1.ManagedFieldsEntry{} + + // Get as many fields as we can from the manager identifier + err = json.Unmarshal([]byte(manager), encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error unmarshalling manager identifier %v: %v", manager, err) + } + + // Get the APIVersion, Operation, and Fields from the VersionedSet + encodedVersionedSet.APIVersion = string(versionedSet.APIVersion()) + if versionedSet.Applied() { + encodedVersionedSet.Operation = metav1.ManagedFieldsOperationApply + } + encodedVersionedSet.FieldsType = "FieldsV1" + fields, err := SetToFields(*versionedSet.Set()) + if err != nil { + return nil, fmt.Errorf("error encoding set: %v", err) + } + encodedVersionedSet.FieldsV1 = &fields + + return encodedVersionedSet, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go new file mode 100644 index 00000000..376eed6b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go @@ -0,0 +1,82 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type managedFieldsUpdater struct { + fieldManager Manager +} + +var _ Manager = &managedFieldsUpdater{} + +// NewManagedFieldsUpdater is responsible for updating the managedfields +// in the object, updating the time of the operation as necessary. For +// updates, it uses a hard-coded manager to detect if things have +// changed, and swaps back the correct manager after the operation is +// done. +func NewManagedFieldsUpdater(fieldManager Manager) Manager { + return &managedFieldsUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *managedFieldsUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + self := "current-operation" + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, self) + if err != nil { + return object, managed, err + } + + // If the current operation took any fields from anything, it means the object changed, + // so update the timestamp of the managedFieldsEntry and merge with any previous updates from the same manager + if vs, ok := managed.Fields()[self]; ok { + delete(managed.Fields(), self) + + if previous, ok := managed.Fields()[manager]; ok { + managed.Fields()[manager] = fieldpath.NewVersionedSet(vs.Set().Union(previous.Set()), vs.APIVersion(), vs.Applied()) + } else { + managed.Fields()[manager] = vs + } + + managed.Times()[manager] = &metav1.Time{Time: time.Now().UTC()} + } + + return object, managed, nil +} + +// Apply implements Manager. +func (f *managedFieldsUpdater) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) + if err != nil { + return object, managed, err + } + if object != nil { + managed.Times()[fieldManager] = &metav1.Time{Time: time.Now().UTC()} + } else { + object = liveObj.DeepCopyObject() + RemoveObjectManagedFields(object) + } + return object, managed, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go new file mode 100644 index 00000000..05393610 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go @@ -0,0 +1,52 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type Managed interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +// Manager updates the managed fields and merges applied configurations. +type Manager interface { + // Update is used when the object has already been merged (non-apply + // use-case), and simply updates the managed fields in the output + // object. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. + Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) + + // Apply is used when server-side apply is called, as it merges the + // object and updates the managed fields. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. + Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go new file mode 100644 index 00000000..1954d65d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +const ( + // Field indicates that the content of this path element is a field's name + Field = "f" + + // Value indicates that the content of this path element is a field's value + Value = "v" + + // Index indicates that the content of this path element is an index in an array + Index = "i" + + // Key indicates that the content of this path element is a key value map + Key = "k" + + // Separator separates the type of a path element from the contents + Separator = ":" +) + +// NewPathElement parses a serialized path element +func NewPathElement(s string) (fieldpath.PathElement, error) { + split := strings.SplitN(s, Separator, 2) + if len(split) < 2 { + return fieldpath.PathElement{}, fmt.Errorf("missing colon: %v", s) + } + switch split[0] { + case Field: + return fieldpath.PathElement{ + FieldName: &split[1], + }, nil + case Value: + val, err := value.FromJSON([]byte(split[1])) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Value: &val, + }, nil + case Index: + i, err := strconv.Atoi(split[1]) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Index: &i, + }, nil + case Key: + kv := map[string]json.RawMessage{} + err := json.Unmarshal([]byte(split[1]), &kv) + if err != nil { + return fieldpath.PathElement{}, err + } + fields := value.FieldList{} + for k, v := range kv { + b, err := json.Marshal(v) + if err != nil { + return fieldpath.PathElement{}, err + } + val, err := value.FromJSON(b) + if err != nil { + return fieldpath.PathElement{}, err + } + + fields = append(fields, value.Field{ + Name: k, + Value: val, + }) + } + return fieldpath.PathElement{ + Key: &fields, + }, nil + default: + // Ignore unknown key types + return fieldpath.PathElement{}, nil + } +} + +// PathElementString serializes a path element +func PathElementString(pe fieldpath.PathElement) (string, error) { + switch { + case pe.FieldName != nil: + return Field + Separator + *pe.FieldName, nil + case pe.Key != nil: + kv := map[string]json.RawMessage{} + for _, k := range *pe.Key { + b, err := value.ToJSON(k.Value) + if err != nil { + return "", err + } + m := json.RawMessage{} + err = json.Unmarshal(b, &m) + if err != nil { + return "", err + } + kv[k.Name] = m + } + b, err := json.Marshal(kv) + if err != nil { + return "", err + } + return Key + ":" + string(b), nil + case pe.Value != nil: + b, err := value.ToJSON(*pe.Value) + if err != nil { + return "", err + } + return Value + ":" + string(b), nil + case pe.Index != nil: + return Index + ":" + strconv.Itoa(*pe.Index), nil + default: + return "", errors.New("Invalid type of path element") + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go new file mode 100644 index 00000000..6b281ec1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "math/rand" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type skipNonAppliedManager struct { + fieldManager Manager + objectCreater runtime.ObjectCreater + gvk schema.GroupVersionKind + beforeApplyManagerName string + probability float32 +} + +var _ Manager = &skipNonAppliedManager{} + +// NewSkipNonAppliedManager creates a new wrapped FieldManager that only starts tracking managers after the first apply. +func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind) Manager { + return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, gvk, 0.0) +} + +// NewProbabilisticSkipNonAppliedManager creates a new wrapped FieldManager that starts tracking managers after the first apply, +// or starts tracking on create with p probability. +func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind, p float32) Manager { + return &skipNonAppliedManager{ + fieldManager: fieldManager, + objectCreater: objectCreater, + gvk: gvk, + beforeApplyManagerName: "before-first-apply", + probability: p, + } +} + +// Update implements Manager. +func (f *skipNonAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + accessor, err := meta.Accessor(liveObj) + if err != nil { + return newObj, managed, nil + } + + // If managed fields is empty, we need to determine whether to skip tracking managed fields. + if len(managed.Fields()) == 0 { + // Check if the operation is a create, by checking whether lastObj's UID is empty. + // If the operation is create, P(tracking managed fields) = f.probability + // If the operation is update, skip tracking managed fields, since we already know managed fields is empty. + if len(accessor.GetUID()) == 0 { + if f.probability <= rand.Float32() { + return newObj, managed, nil + } + } else { + return newObj, managed, nil + } + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + if len(managed.Fields()) == 0 { + emptyObj, err := f.objectCreater.New(f.gvk) + if err != nil { + return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", f.gvk, err) + } + liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName) + if err != nil { + return nil, nil, fmt.Errorf("failed to create manager for existing fields: %v", err) + } + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go new file mode 100644 index 00000000..9b61f3a6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type stripMetaManager struct { + fieldManager Manager + + // stripSet is the list of fields that should never be part of a mangedFields. + stripSet *fieldpath.Set +} + +var _ Manager = &stripMetaManager{} + +// NewStripMetaManager creates a new Manager that strips metadata and typemeta fields from the manager's fieldset. +func NewStripMetaManager(fieldManager Manager) Manager { + return &stripMetaManager{ + fieldManager: fieldManager, + stripSet: fieldpath.NewSet( + fieldpath.MakePathOrDie("apiVersion"), + fieldpath.MakePathOrDie("kind"), + fieldpath.MakePathOrDie("metadata"), + fieldpath.MakePathOrDie("metadata", "name"), + fieldpath.MakePathOrDie("metadata", "namespace"), + fieldpath.MakePathOrDie("metadata", "creationTimestamp"), + fieldpath.MakePathOrDie("metadata", "selfLink"), + fieldpath.MakePathOrDie("metadata", "uid"), + fieldpath.MakePathOrDie("metadata", "clusterName"), + fieldpath.MakePathOrDie("metadata", "generation"), + fieldpath.MakePathOrDie("metadata", "managedFields"), + fieldpath.MakePathOrDie("metadata", "resourceVersion"), + ), + } +} + +// Update implements Manager. +func (f *stripMetaManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *stripMetaManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// stripFields removes a predefined set of paths found in typed from managed +func (f *stripMetaManager) stripFields(managed fieldpath.ManagedFields, manager string) { + vs, ok := managed[manager] + if ok { + if vs == nil { + panic(fmt.Sprintf("Found unexpected nil manager which should never happen: %s", manager)) + } + newSet := vs.Set().Difference(f.stripSet) + if newSet.Empty() { + delete(managed, manager) + } else { + managed[manager] = fieldpath.NewVersionedSet(newSet, vs.APIVersion(), vs.Applied()) + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go new file mode 100644 index 00000000..2112c9ab --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go @@ -0,0 +1,186 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +type structuredMergeManager struct { + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + objectDefaulter runtime.ObjectDefaulter + groupVersion schema.GroupVersion + hubVersion schema.GroupVersion + updater merge.Updater +} + +var _ Manager = &structuredMergeManager{} + +// NewStructuredMergeManager creates a new Manager that merges apply requests +// and update managed fields for other types of requests. +func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) { + if typeConverter == nil { + return nil, fmt.Errorf("typeconverter must not be nil") + } + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s + IgnoredFields: resetFields, + }, + }, nil +} + +// NewCRDStructuredMergeManager creates a new Manager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ Manager, err error) { + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newCRDVersionConverter(typeConverter, objectConverter, hub), + IgnoredFields: resetFields, + }, + }, nil +} + +func objectGVKNN(obj runtime.Object) string { + name := "" + namespace := "" + if accessor, err := meta.Accessor(obj); err == nil { + name = accessor.GetName() + namespace = accessor.GetNamespace() + } + + return fmt.Sprintf("%v/%v; %v", namespace, name, obj.GetObjectKind().GroupVersionKind()) +} + +// Update implements Manager. +func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version (%v): %v", objectGVKNN(newObj), f.groupVersion, err) + } + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) + } + newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", objectGVKNN(newObjVersioned), err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", objectGVKNN(liveObjVersioned), err) + } + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + + // TODO(apelisse) use the first return value when unions are implemented + _, managedFields, err := f.updater.Update(liveObjTyped, newObjTyped, apiVersion, managed.Fields(), manager) + if err != nil { + return nil, nil, fmt.Errorf("failed to update ManagedFields (%v): %v", objectGVKNN(newObjVersioned), err) + } + managed = NewManaged(managedFields, managed.Times()) + + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + // Check that the patch object has the same version as the live object + if patchVersion := patchObj.GetObjectKind().GroupVersionKind().GroupVersion(); patchVersion != f.groupVersion { + return nil, nil, + errors.NewBadRequest( + fmt.Sprintf("Incorrect version specified in apply patch. "+ + "Specified patch version: %s, expected: %s", + patchVersion, f.groupVersion)) + } + + patchObjMeta, err := meta.Accessor(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("couldn't get accessor: %v", err) + } + if patchObjMeta.GetManagedFields() != nil { + return nil, nil, errors.NewBadRequest("metadata.managedFields must be nil") + } + + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) + } + + patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed patch object (%v): %v", objectGVKNN(patchObj), err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed live object (%v): %v", objectGVKNN(liveObjVersioned), err) + } + + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + newObjTyped, managedFields, err := f.updater.Apply(liveObjTyped, patchObjTyped, apiVersion, managed.Fields(), manager, force) + if err != nil { + return nil, nil, err + } + managed = NewManaged(managedFields, managed.Times()) + + if newObjTyped == nil { + return nil, managed, nil + } + + newObj, err := f.typeConverter.TypedToObject(newObjTyped) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new typed object (%v) to object: %v", objectGVKNN(patchObj), err) + } + + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version: %v", objectGVKNN(patchObj), err) + } + f.objectDefaulter.Default(newObjVersioned) + + newObjUnversioned, err := f.toUnversioned(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert to unversioned (%v): %v", objectGVKNN(patchObj), err) + } + return newObjUnversioned, managed, nil +} + +func (f *structuredMergeManager) toVersioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.groupVersion) +} + +func (f *structuredMergeManager) toUnversioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.hubVersion) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go new file mode 100644 index 00000000..1ac96d7f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go @@ -0,0 +1,193 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/schemaconv" + "k8s.io/kube-openapi/pkg/validation/spec" + smdschema "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter interface { + ObjectToTyped(runtime.Object) (*typed.TypedValue, error) + TypedToObject(*typed.TypedValue) (runtime.Object, error) +} + +type typeConverter struct { + parser map[schema.GroupVersionKind]*typed.ParseableType +} + +var _ TypeConverter = &typeConverter{} + +func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) { + typeSchema, err := schemaconv.ToSchemaFromOpenAPI(openapiSpec, preserveUnknownFields) + if err != nil { + return nil, fmt.Errorf("failed to convert models to schema: %v", err) + } + + typeParser := typed.Parser{Schema: smdschema.Schema{Types: typeSchema.Types}} + tr := indexModels(&typeParser, openapiSpec) + + return &typeConverter{parser: tr}, nil +} + +func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + t := c.parser[gvk] + if t == nil { + return nil, NewNoCorrespondingTypeError(gvk) + } + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent()) + default: + return t.FromStructured(obj) + } +} + +func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +type deducedTypeConverter struct{} + +// DeducedTypeConverter is a TypeConverter for CRDs that don't have a +// schema. It does implement the same interface though (and create the +// same types of objects), so that everything can still work the same. +// CRDs are merged with all their fields being "atomic" (lists +// included). +func NewDeducedTypeConverter() TypeConverter { + return deducedTypeConverter{} +} + +// ObjectToTyped converts an object into a TypedValue with a "deduced type". +func (deducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { + switch o := obj.(type) { + case *unstructured.Unstructured: + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent()) + default: + return typed.DeducedParseableType.FromStructured(obj) + } +} + +// TypedToObject transforms the typed value into a runtime.Object. That +// is not specific to deduced type. +func (deducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +func valueToObject(val value.Value) (runtime.Object, error) { + vu := val.Unstructured() + switch o := vu.(type) { + case map[string]interface{}: + return &unstructured.Unstructured{Object: o}, nil + default: + return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) + } +} + +func indexModels( + typeParser *typed.Parser, + openAPISchemas map[string]*spec.Schema, +) map[schema.GroupVersionKind]*typed.ParseableType { + tr := map[schema.GroupVersionKind]*typed.ParseableType{} + for modelName, model := range openAPISchemas { + gvkList := parseGroupVersionKind(model.Extensions) + if len(gvkList) == 0 { + continue + } + + parsedType := typeParser.Type(modelName) + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + tr[schema.GroupVersionKind(gvk)] = &parsedType + } + } + } + return tr +} + +// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(extensions map[string]interface{}) []schema.GroupVersionKind { + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions["x-kubernetes-group-version-kind"] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + var group, version, kind string + + // gvk extension list must be a map with group, version, and + // kind fields + if gvkMap, ok := gvk.(map[interface{}]interface{}); ok { + group, ok = gvkMap["group"].(string) + if !ok { + continue + } + version, ok = gvkMap["version"].(string) + if !ok { + continue + } + kind, ok = gvkMap["kind"].(string) + if !ok { + continue + } + + } else if gvkMap, ok := gvk.(map[string]interface{}); ok { + group, ok = gvkMap["group"].(string) + if !ok { + continue + } + version, ok = gvkMap["version"].(string) + if !ok { + continue + } + kind, ok = gvkMap["kind"].(string) + if !ok { + continue + } + } else { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go new file mode 100644 index 00000000..45855fa4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go @@ -0,0 +1,123 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// versionConverter is an implementation of +// sigs.k8s.io/structured-merge-diff/merge.Converter +type versionConverter struct { + typeConverter TypeConverter + objectConvertor runtime.ObjectConvertor + hubGetter func(from schema.GroupVersion) schema.GroupVersion +} + +var _ merge.Converter = &versionConverter{} + +// NewVersionConverter builds a VersionConverter from a TypeConverter and an ObjectConvertor. +func newVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return schema.GroupVersion{ + Group: from.Group, + Version: h.Version, + } + }, + } +} + +// NewCRDVersionConverter builds a VersionConverter for CRDs from a TypeConverter and an ObjectConvertor. +func newCRDVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return h + }, + } +} + +// Convert implements sigs.k8s.io/structured-merge-diff/merge.Converter +func (v *versionConverter) Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) { + // Convert the smd typed value to a kubernetes object. + objectToConvert, err := v.typeConverter.TypedToObject(object) + if err != nil { + return object, err + } + + // Parse the target groupVersion. + groupVersion, err := schema.ParseGroupVersion(string(version)) + if err != nil { + return object, err + } + + // If attempting to convert to the same version as we already have, just return it. + fromVersion := objectToConvert.GetObjectKind().GroupVersionKind().GroupVersion() + if fromVersion == groupVersion { + return object, nil + } + + // Convert to internal + internalObject, err := v.objectConvertor.ConvertToVersion(objectToConvert, v.hubGetter(fromVersion)) + if err != nil { + return object, err + } + + // Convert the object into the target version + convertedObject, err := v.objectConvertor.ConvertToVersion(internalObject, groupVersion) + if err != nil { + return object, err + } + + // Convert the object back to a smd typed value and return it. + return v.typeConverter.ObjectToTyped(convertedObject) +} + +// IsMissingVersionError +func (v *versionConverter) IsMissingVersionError(err error) bool { + return runtime.IsNotRegisteredError(err) || isNoCorrespondingTypeError(err) +} + +type noCorrespondingTypeErr struct { + gvk schema.GroupVersionKind +} + +func NewNoCorrespondingTypeError(gvk schema.GroupVersionKind) error { + return &noCorrespondingTypeErr{gvk: gvk} +} + +func (k *noCorrespondingTypeErr) Error() string { + return fmt.Sprintf("no corresponding type for %v", k.gvk) +} + +func isNoCorrespondingTypeError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*noCorrespondingTypeErr) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml new file mode 100644 index 00000000..66e849f2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml @@ -0,0 +1,261 @@ +apiVersion: v1 +kind: Node +metadata: + annotations: + container.googleapis.com/instance_id: "123456789321654789" + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2019-07-09T16:17:29Z" + labels: + kubernetes.io/arch: amd64 + beta.kubernetes.io/fluentd-ds-ready: "true" + beta.kubernetes.io/instance-type: n1-standard-4 + kubernetes.io/os: linux + cloud.google.com/gke-nodepool: default-pool + cloud.google.com/gke-os-distribution: cos + failure-domain.beta.kubernetes.io/region: us-central1 + failure-domain.beta.kubernetes.io/zone: us-central1-b + topology.kubernetes.io/region: us-central1 + topology.kubernetes.io/zone: us-central1-b + kubernetes.io/hostname: node-default-pool-something + name: node-default-pool-something + resourceVersion: "211582541" + selfLink: /api/v1/nodes/node-default-pool-something + uid: 0c24d0e1-a265-11e9-abe4-42010a80026b +spec: + podCIDR: 10.0.0.1/24 + providerID: some-provider-id-of-some-sort +status: + addresses: + - address: 10.0.0.1 + type: InternalIP + - address: 192.168.0.1 + type: ExternalIP + - address: node-default-pool-something + type: Hostname + allocatable: + cpu: 3920m + ephemeral-storage: "104638878617" + hugepages-2Mi: "0" + memory: 12700100Ki + pods: "110" + capacity: + cpu: "4" + ephemeral-storage: 202086868Ki + hugepages-2Mi: "0" + memory: 15399364Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:08Z" + message: containerd is functioning properly + reason: FrequentContainerdRestart + status: "False" + type: FrequentContainerdRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker overlay2 is functioning properly + reason: CorruptDockerOverlay2 + status: "False" + type: CorruptDockerOverlay2 + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: node is functioning properly + reason: UnregisterNetDevice + status: "False" + type: FrequentUnregisterNetDevice + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: kernel has no deadlock + reason: KernelHasNoDeadlock + status: "False" + type: KernelDeadlock + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: Filesystem is not read-only + reason: FilesystemIsNotReadOnly + status: "False" + type: ReadonlyFilesystem + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:05Z" + message: kubelet is functioning properly + reason: FrequentKubeletRestart + status: "False" + type: FrequentKubeletRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker is functioning properly + reason: FrequentDockerRestart + status: "False" + type: FrequentDockerRestart + - lastHeartbeatTime: "2019-07-09T16:17:47Z" + lastTransitionTime: "2019-07-09T16:17:47Z" + message: RouteController created a route + reason: RouteCreated + status: "False" + type: NetworkUnavailable + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient disk space available + reason: KubeletHasSufficientDisk + status: "False" + type: OutOfDisk + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:49Z" + message: kubelet is posting ready status. AppArmor enabled + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - grafana/grafana@sha256:80e5e113a984d74836aa16f5b4524012099436b1a50df293f00ac6377fb512c8 + - grafana/grafana:4.4.2 + sizeBytes: 287008013 + - names: + - registry.k8s.io/node-problem-detector@sha256:f95cab985c26b2f46e9bd43283e0bfa88860c14e0fb0649266babe8b65e9eb2b + - registry.k8s.io/node-problem-detector:v0.4.1 + sizeBytes: 286572743 + - names: + - grafana/grafana@sha256:7ff7f9b2501a5d55b55ce3f58d21771b1c5af1f2a4ab7dbf11bef7142aae7033 + - grafana/grafana:4.2.0 + sizeBytes: 277940263 + - names: + - influxdb@sha256:7dddf03376348876ed4bdf33d6dfa3326f45a2bae0930dbd80781a374eb519bc + - influxdb:1.2.2 + sizeBytes: 223948571 + - names: + - gcr.io/stackdriver-agents/stackdriver-logging-agent@sha256:f8d5231b67b9c53f60068b535a11811d29d1b3efd53d2b79f2a2591ea338e4f2 + - gcr.io/stackdriver-agents/stackdriver-logging-agent:0.6-1.6.0-1 + sizeBytes: 223242132 + - names: + - nginx@sha256:35779791c05d119df4fe476db8f47c0bee5943c83eba5656a15fc046db48178b + - nginx:1.10.1 + sizeBytes: 180708613 + - names: + - registry.k8s.io/fluentd-elasticsearch@sha256:b8c94527b489fb61d3d81ce5ad7f3ddbb7be71e9620a3a36e2bede2f2e487d73 + - registry.k8s.io/fluentd-elasticsearch:v2.0.4 + sizeBytes: 135716379 + - names: + - nginx@sha256:00be67d6ba53d5318cd91c57771530f5251cfbe028b7be2c4b70526f988cfc9f + - nginx:latest + sizeBytes: 109357355 + - names: + - registry.k8s.io/kubernetes-dashboard-amd64@sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0 + - registry.k8s.io/kubernetes-dashboard-amd64:v1.8.3 + sizeBytes: 102319441 + - names: + - gcr.io/google_containers/kube-proxy:v1.11.10-gke.5 + - registry.k8s.io/kube-proxy:v1.11.10-gke.5 + sizeBytes: 102279340 + - names: + - registry.k8s.io/event-exporter@sha256:7f9cd7cb04d6959b0aa960727d04fa86759008048c785397b7b0d9dff0007516 + - registry.k8s.io/event-exporter:v0.2.3 + sizeBytes: 94171943 + - names: + - registry.k8s.io/prometheus-to-sd@sha256:6c0c742475363d537ff059136e5d5e4ab1f512ee0fd9b7ca42ea48bc309d1662 + - registry.k8s.io/prometheus-to-sd:v0.3.1 + sizeBytes: 88077694 + - names: + - registry.k8s.io/fluentd-gcp-scaler@sha256:a5ace7506d393c4ed65eb2cbb6312c64ab357fcea16dff76b9055bc6e498e5ff + - registry.k8s.io/fluentd-gcp-scaler:0.5.1 + sizeBytes: 86637208 + - names: + - registry.k8s.io/heapster-amd64@sha256:9fae0af136ce0cf4f88393b3670f7139ffc464692060c374d2ae748e13144521 + - registry.k8s.io/heapster-amd64:v1.6.0-beta.1 + sizeBytes: 76016169 + - names: + - registry.k8s.io/ingress-glbc-amd64@sha256:31d36bbd9c44caffa135fc78cf0737266fcf25e3cf0cd1c2fcbfbc4f7309cc52 + - registry.k8s.io/ingress-glbc-amd64:v1.1.1 + sizeBytes: 67801919 + - names: + - registry.k8s.io/kube-addon-manager@sha256:d53486c3a0b49ebee019932878dc44232735d5622a51dbbdcec7124199020d09 + - registry.k8s.io/kube-addon-manager:v8.7 + sizeBytes: 63322109 + - names: + - nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315 + - nginx:1.10-alpine + sizeBytes: 54042627 + - names: + - registry.k8s.io/cpvpa-amd64@sha256:cfe7b0a11c9c8e18c87b1eb34fef9a7cbb8480a8da11fc2657f78dbf4739f869 + - registry.k8s.io/cpvpa-amd64:v0.6.0 + sizeBytes: 51785854 + - names: + - registry.k8s.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d + - registry.k8s.io/cluster-proportional-autoscaler-amd64:1.1.2-r2 + sizeBytes: 49648481 + - names: + - registry.k8s.io/ip-masq-agent-amd64@sha256:1ffda57d87901bc01324c82ceb2145fe6a0448d3f0dd9cb65aa76a867cd62103 + - registry.k8s.io/ip-masq-agent-amd64:v2.1.1 + sizeBytes: 49612505 + - names: + - registry.k8s.io/k8s-dns-kube-dns-amd64@sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8 + - registry.k8s.io/k8s-dns-kube-dns-amd64:1.14.10 + sizeBytes: 49549457 + - names: + - registry.k8s.io/rescheduler@sha256:156cfbfd05a5a815206fd2eeb6cbdaf1596d71ea4b415d3a6c43071dd7b99450 + - registry.k8s.io/rescheduler:v0.4.0 + sizeBytes: 48973149 + - names: + - registry.k8s.io/event-exporter@sha256:16ca66e2b5dc7a1ce6a5aafcb21d0885828b75cdfc08135430480f7ad2364adc + - registry.k8s.io/event-exporter:v0.2.4 + sizeBytes: 47261019 + - names: + - registry.k8s.io/coredns@sha256:db2bf53126ed1c761d5a41f24a1b82a461c85f736ff6e90542e9522be4757848 + - registry.k8s.io/coredns:1.1.3 + sizeBytes: 45587362 + - names: + - prom/prometheus@sha256:483f4c9d7733699ba79facca9f8bcce1cef1af43dfc3e7c5a1882aa85f53cb74 + - prom/prometheus:v1.1.3 + sizeBytes: 45493941 + nodeInfo: + architecture: amd64 + bootID: a32eca78-4ad4-4b76-9252-f143d6c2ae61 + containerRuntimeVersion: docker://17.3.2 + kernelVersion: 4.14.127+ + kubeProxyVersion: v1.11.10-gke.5 + kubeletVersion: v1.11.10-gke.5 + machineID: 1739555e5b231057f0f9a0b5fa29511b + operatingSystem: linux + osImage: Container-Optimized OS from Google + systemUUID: 1739555E-5B23-1057-F0F9-A0B5FA29511B + volumesAttached: + - devicePath: /dev/disk/by-id/b9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/b9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + volumesInUse: + - kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml new file mode 100644 index 00000000..3fb0877d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml @@ -0,0 +1,121 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: some-app + plugin1: some-value + plugin2: some-value + plugin3: some-value + plugin4: some-value + name: some-name + namespace: default + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: some-name + uid: 0a9d2b9e-779e-11e7-b422-42010a8001be +spec: + containers: + - args: + - one + - two + - three + - four + - five + - six + - seven + - eight + - nine + env: + - name: VAR_3 + valueFrom: + secretKeyRef: + key: some-other-key + name: some-oher-name + - name: VAR_2 + valueFrom: + secretKeyRef: + key: other-key + name: other-name + - name: VAR_1 + valueFrom: + secretKeyRef: + key: some-key + name: some-name + image: some-image-name + imagePullPolicy: IfNotPresent + name: some-name + resources: + requests: + cpu: '0' + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-hu5jz + readOnly: true + dnsPolicy: ClusterFirst + nodeName: node-name + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - name: default-token-hu5jz + secret: + defaultMode: 420 + secretName: default-token-hu5jz +status: + conditions: + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: Initialized + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:41:59Z' + status: 'True' + type: Ready + - lastProbeTime: null + lastTransitionTime: null + status: 'True' + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: PodScheduled + containerStatuses: + - containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376 + image: some-image-name + imageID: docker-pullable://some-image-id + lastState: + terminated: + containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565 + exitCode: 255 + finishedAt: '2019-07-08T09:39:09Z' + reason: Error + startedAt: '2019-07-08T09:38:54Z' + name: name + ready: true + restartCount: 6 + state: + running: + startedAt: '2019-07-08T09:41:59Z' + hostIP: 10.0.0.1 + phase: Running + podIP: 10.0.0.1 + qosClass: BestEffort + startTime: '2019-07-08T09:31:18Z' diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go new file mode 100644 index 00000000..48b774ce --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go @@ -0,0 +1,174 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +var ( + scaleGroupVersion = schema.GroupVersion{Group: "autoscaling", Version: "v1"} + replicasPathInScale = fieldpath.MakePathOrDie("spec", "replicas") +) + +// ResourcePathMappings maps a group/version to its replicas path. The +// assumption is that all the paths correspond to leaf fields. +type ResourcePathMappings map[string]fieldpath.Path + +// ScaleHandler manages the conversion of managed fields between a main +// resource and the scale subresource +type ScaleHandler struct { + parentEntries []metav1.ManagedFieldsEntry + groupVersion schema.GroupVersion + mappings ResourcePathMappings +} + +// NewScaleHandler creates a new ScaleHandler +func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion schema.GroupVersion, mappings ResourcePathMappings) *ScaleHandler { + return &ScaleHandler{ + parentEntries: parentEntries, + groupVersion: groupVersion, + mappings: mappings, + } +} + +// ToSubresource filter the managed fields of the main resource and convert +// them so that they can be handled by scale. +// For the managed fields that have a replicas path it performs two changes: +// 1. APIVersion is changed to the APIVersion of the scale subresource +// 2. Replicas path of the main resource is transformed to the replicas path of +// the scale subresource +func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) { + managed, err := internal.DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + for manager, versionedSet := range managed.Fields() { + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Skip the entry if the APIVersion is unknown + if !ok || path == nil { + continue + } + + if versionedSet.Set().Has(path) { + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(replicasPathInScale), + fieldpath.APIVersion(scaleGroupVersion.String()), + versionedSet.Applied(), + ) + + f[manager] = newVersionedSet + t[manager] = managed.Times()[manager] + } + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +// ToParent merges `scaleEntries` with the entries of the main resource and +// transforms them accordingly +func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]metav1.ManagedFieldsEntry, error) { + decodedParentEntries, err := internal.DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + parentFields := decodedParentEntries.Fields() + + decodedScaleEntries, err := internal.DecodeManagedFields(scaleEntries) + if err != nil { + return nil, err + } + scaleFields := decodedScaleEntries.Fields() + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + + for manager, versionedSet := range parentFields { + // Get the main resource "replicas" path + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Drop the entry if the APIVersion is unknown. + if !ok { + continue + } + + // If the parent entry does not have the replicas path or it is nil, just + // keep it as it is. The path is nil for Custom Resources without scale + // subresource. + if path == nil || !versionedSet.Set().Has(path) { + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + continue + } + + if _, ok := scaleFields[manager]; !ok { + // "Steal" the replicas path from the main resource entry + newSet := versionedSet.Set().Difference(fieldpath.NewSet(path)) + + if !newSet.Empty() { + newVersionedSet := fieldpath.NewVersionedSet( + newSet, + versionedSet.APIVersion(), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + } else { + // Field wasn't stolen, let's keep the entry as it is. + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + delete(scaleFields, manager) + } + } + + for manager, versionedSet := range scaleFields { + if !versionedSet.Set().Has(replicasPathInScale) { + continue + } + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(h.mappings[h.groupVersion.String()]), + fieldpath.APIVersion(h.groupVersion.String()), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +func managedFieldsEntries(entries internal.ManagedInterface) ([]metav1.ManagedFieldsEntry, error) { + obj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := internal.EncodeObjectManagedFields(obj, entries); err != nil { + return nil, err + } + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + return accessor.GetManagedFields(), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go new file mode 100644 index 00000000..d031eefa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter = internal.TypeConverter + +// NewDeducedTypeConverter creates a TypeConverter for CRDs that don't +// have a schema. It does implement the same interface though (and +// create the same types of objects), so that everything can still work +// the same. CRDs are merged with all their fields being "atomic" (lists +// included). +func NewDeducedTypeConverter() TypeConverter { + return internal.NewDeducedTypeConverter() +} + +// NewTypeConverter builds a TypeConverter from a map of OpenAPIV3 schemas. +// This will automatically find the proper version of the object, and the +// corresponding schema information. +// The keys to the map must be consistent with the names +// used by Refs within the schemas. +// The schemas should conform to the Kubernetes Structural Schema OpenAPI +// restrictions found in docs: +// https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema +func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) { + return internal.NewTypeConverter(openapiSpec, preserveUnknownFields) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go index e3962756..a20efd18 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -88,8 +88,7 @@ func toYAML(v interface{}) (string, error) { // supports JSON merge patch semantics. // // NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. -// -// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). func HasConflicts(left, right interface{}) (bool, error) { switch typedLeft := left.(type) { case map[string]interface{}: diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index d738725c..3674914f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -126,14 +126,17 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. func (r *rudimentaryErrorBackoff) OnError(error) { + now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() - defer r.lastErrorTimeLock.Unlock() - d := time.Since(r.lastErrorTime) - if d < r.minPeriod { - // If the time moves backwards for any reason, do nothing - time.Sleep(r.minPeriod - d) - } + d := now.Sub(r.lastErrorTime) r.lastErrorTime = time.Now() + r.lastErrorTimeLock.Unlock() + + // Do not sleep with the lock held because that causes all callers of HandleError to block. + // We only want the current goroutine to block. + // A negative or zero duration causes time.Sleep to return immediately. + // If the time moves backwards for any reason, do nothing. + time.Sleep(r.minPeriod - d) } // GetCaller returns the caller of the function that calls it. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go index 99c292fe..d50526f4 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go @@ -64,6 +64,20 @@ func (s Set[T]) Delete(items ...T) Set[T] { return s } +// Clear empties the set. +// It is preferable to replace the set with a newly constructed set, +// but not all callers can do that (when there are other references to the map). +// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN +// can't be cleared because NaN can't be removed. +// For sets containing items of a type that is reflexive for ==, +// this is optimized to a single call to runtime.mapclear(). +func (s Set[T]) Clear() Set[T] { + for key := range s { + delete(s, key) + } + return s +} + // Has returns true if and only if item is contained in the set. func (s Set[T]) Has(item T) bool { _, contained := s[item] diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS index 4443bafd..73244449 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -1,6 +1,7 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: + - apelisse - pwittrock reviewers: - apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 6fb36973..3ee683b9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1106,7 +1106,7 @@ func applyRetainKeysDirective(original, patch map[string]interface{}, options Me // Then, sort them by the relative order in setElementOrder, patch list and live list. // The precedence is $setElementOrder > order in patch list > order in live list. // This function will delete the item after merging it to prevent process it again in the future. -// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md +// Ref: https://git.k8s.io/design-proposals-archive/cli/preserve-order-in-strategic-merge-patch.md func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { for key, patchV := range patch { // Do nothing if there is no ordering directive diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index e767092d..0b8a6cb3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -191,7 +191,13 @@ func IsDNS1123Label(value string) []string { errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) } if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + if dns1123SubdomainRegexp.MatchString(value) { + // It was a valid subdomain and not a valid label. Since we + // already checked length, it must be dots. + errs = append(errs, "must not contain dots") + } else { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } } return errs } diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go new file mode 100644 index 00000000..41876192 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go @@ -0,0 +1,502 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "math" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/utils/clock" +) + +// Backoff holds parameters applied to a Backoff function. +type Backoff struct { + // The initial duration. + Duration time.Duration + // Duration is multiplied by factor each iteration, if factor is not zero + // and the limits imposed by Steps and Cap have not been reached. + // Should not be negative. + // The jitter does not contribute to the updates to the duration parameter. + Factor float64 + // The sleep at each iteration is the duration plus an additional + // amount chosen uniformly at random from the interval between + // zero and `jitter*duration`. + Jitter float64 + // The remaining number of iterations in which the duration + // parameter may change (but progress can be stopped earlier by + // hitting the cap). If not positive, the duration is not + // changed. Used for exponential backoff in combination with + // Factor and Cap. + Steps int + // A limit on revised values of the duration parameter. If a + // multiplication by the factor parameter would make the duration + // exceed the cap then the duration is set to the cap and the + // steps parameter is set to zero. + Cap time.Duration +} + +// Step returns an amount of time to sleep determined by the original +// Duration and Jitter. The backoff is mutated to update its Steps and +// Duration. A nil Backoff always has a zero-duration step. +func (b *Backoff) Step() time.Duration { + if b == nil { + return 0 + } + var nextDuration time.Duration + nextDuration, b.Duration, b.Steps = delay(b.Steps, b.Duration, b.Cap, b.Factor, b.Jitter) + return nextDuration +} + +// DelayFunc returns a function that will compute the next interval to +// wait given the arguments in b. It does not mutate the original backoff +// but the function is safe to use only from a single goroutine. +func (b Backoff) DelayFunc() DelayFunc { + steps := b.Steps + duration := b.Duration + cap := b.Cap + factor := b.Factor + jitter := b.Jitter + + return func() time.Duration { + var nextDuration time.Duration + // jitter is applied per step and is not cumulative over multiple steps + nextDuration, duration, steps = delay(steps, duration, cap, factor, jitter) + return nextDuration + } +} + +// Timer returns a timer implementation appropriate to this backoff's parameters +// for use with wait functions. +func (b Backoff) Timer() Timer { + if b.Steps > 1 || b.Jitter != 0 { + return &variableTimer{new: internalClock.NewTimer, fn: b.DelayFunc()} + } + if b.Duration > 0 { + return &fixedTimer{new: internalClock.NewTicker, interval: b.Duration} + } + return newNoopTimer() +} + +// delay implements the core delay algorithm used in this package. +func delay(steps int, duration, cap time.Duration, factor, jitter float64) (_ time.Duration, next time.Duration, nextSteps int) { + // when steps is non-positive, do not alter the base duration + if steps < 1 { + if jitter > 0 { + return Jitter(duration, jitter), duration, 0 + } + return duration, duration, 0 + } + steps-- + + // calculate the next step's interval + if factor != 0 { + next = time.Duration(float64(duration) * factor) + if cap > 0 && next > cap { + next = cap + steps = 0 + } + } else { + next = duration + } + + // add jitter for this step + if jitter > 0 { + duration = Jitter(duration, jitter) + } + + return duration, next, steps + +} + +// DelayWithReset returns a DelayFunc that will return the appropriate next interval to +// wait. Every resetInterval the backoff parameters are reset to their initial state. +// This method is safe to invoke from multiple goroutines, but all calls will advance +// the backoff state when Factor is set. If Factor is zero, this method is the same as +// invoking b.DelayFunc() since Steps has no impact without Factor. If resetInterval is +// zero no backoff will be performed as the same calling DelayFunc with a zero factor +// and steps. +func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) DelayFunc { + if b.Factor <= 0 { + return b.DelayFunc() + } + if resetInterval <= 0 { + b.Steps = 0 + b.Factor = 0 + return b.DelayFunc() + } + return (&backoffManager{ + backoff: b, + initialBackoff: b, + resetInterval: resetInterval, + + clock: c, + lastStart: c.Now(), + timer: nil, + }).Step +} + +// Until loops until stop channel is closed, running f every period. +// +// Until is syntactic sugar on top of JitterUntil with zero jitter factor and +// with sliding = true (which means the timer for period starts after the f +// completes). +func Until(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, true, stopCh) +} + +// UntilWithContext loops until context is done, running f every period. +// +// UntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor and with sliding = true (which means the timer +// for period starts after the f completes). +func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, true) +} + +// NonSlidingUntil loops until stop channel is closed, running f every +// period. +// +// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter +// factor, with sliding = false (meaning the timer for period starts at the same +// time as the function starts). +func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, false, stopCh) +} + +// NonSlidingUntilWithContext loops until context is done, running f every +// period. +// +// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor, with sliding = false (meaning the timer for period +// starts at the same time as the function starts). +func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, false) +} + +// JitterUntil loops until stop channel is closed, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Close stopCh to stop. f may not be invoked if stop channel is already +// closed. Pass NeverStop to if you don't want it stop. +func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { + BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) +} + +// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { + var t clock.Timer + for { + select { + case <-stopCh: + return + default: + } + + if !sliding { + t = backoff.Backoff() + } + + func() { + defer runtime.HandleCrash() + f() + }() + + if sliding { + t = backoff.Backoff() + } + + // NOTE: b/c there is no priority selection in golang + // it is possible for this to race, meaning we could + // trigger t.C and stopCh, and t.C select falls through. + // In order to mitigate we re-check stopCh at the beginning + // of every loop to prevent extra executions of f(). + select { + case <-stopCh: + if !t.Stop() { + <-t.C() + } + return + case <-t.C(): + } + } +} + +// JitterUntilWithContext loops until context is done, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Cancel context to stop. f may not be invoked if context is already expired. +func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { + JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) +} + +// backoffManager provides simple backoff behavior in a threadsafe manner to a caller. +type backoffManager struct { + backoff Backoff + initialBackoff Backoff + resetInterval time.Duration + + clock clock.Clock + + lock sync.Mutex + lastStart time.Time + timer clock.Timer +} + +// Step returns the expected next duration to wait. +func (b *backoffManager) Step() time.Duration { + b.lock.Lock() + defer b.lock.Unlock() + + switch { + case b.resetInterval == 0: + b.backoff = b.initialBackoff + case b.clock.Now().Sub(b.lastStart) > b.resetInterval: + b.backoff = b.initialBackoff + b.lastStart = b.clock.Now() + } + return b.backoff.Step() +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer +// for exponential backoff. The returned timer must be drained before calling Backoff() the second +// time. +func (b *backoffManager) Backoff() clock.Timer { + b.lock.Lock() + defer b.lock.Unlock() + if b.timer == nil { + b.timer = b.clock.NewTimer(b.Step()) + } else { + b.timer.Reset(b.Step()) + } + return b.timer +} + +// Timer returns a new Timer instance that shares the clock and the reset behavior with all other +// timers. +func (b *backoffManager) Timer() Timer { + return DelayFunc(b.Step).Timer(b.clock) +} + +// BackoffManager manages backoff with a particular scheme based on its underlying implementation. +type BackoffManager interface { + // Backoff returns a shared clock.Timer that is Reset on every invocation. This method is not + // safe for use from multiple threads. It returns a timer for backoff, and caller shall backoff + // until Timer.C() drains. If the second Backoff() is called before the timer from the first + // Backoff() call finishes, the first timer will NOT be drained and result in undetermined + // behavior. + Backoff() clock.Timer +} + +// Deprecated: Will be removed when the legacy polling functions are removed. +type exponentialBackoffManagerImpl struct { + backoff *Backoff + backoffTimer clock.Timer + lastBackoffStart time.Time + initialBackoff time.Duration + backoffResetDuration time.Duration + clock clock.Clock +} + +// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and +// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset. +// This backoff manager is used to reduce load during upstream unhealthiness. +// +// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a +// Backoff struct, use DelayWithReset() to get a DelayFunc that periodically resets itself, and then +// invoke Timer() when calling wait.BackoffUntil. +// +// Instead of: +// +// bm := wait.NewExponentialBackoffManager(init, max, reset, factor, jitter, clock) +// ... +// wait.BackoffUntil(..., bm.Backoff, ...) +// +// Use: +// +// delayFn := wait.Backoff{ +// Duration: init, +// Cap: max, +// Steps: int(math.Ceil(float64(max) / float64(init))), // now a required argument +// Factor: factor, +// Jitter: jitter, +// }.DelayWithReset(reset, clock) +// wait.BackoffUntil(..., delayFn.Timer(), ...) +func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager { + return &exponentialBackoffManagerImpl{ + backoff: &Backoff{ + Duration: initBackoff, + Factor: backoffFactor, + Jitter: jitter, + + // the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not + // what we ideally need here, we set it to max int and assume we will never use up the steps + Steps: math.MaxInt32, + Cap: maxBackoff, + }, + backoffTimer: nil, + initialBackoff: initBackoff, + lastBackoffStart: c.Now(), + backoffResetDuration: resetDuration, + clock: c, + } +} + +func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { + if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration { + b.backoff.Steps = math.MaxInt32 + b.backoff.Duration = b.initialBackoff + } + b.lastBackoffStart = b.clock.Now() + return b.backoff.Step() +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. +// The returned timer must be drained before calling Backoff() the second time +func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { + if b.backoffTimer == nil { + b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) + } else { + b.backoffTimer.Reset(b.getNextBackoff()) + } + return b.backoffTimer +} + +// Deprecated: Will be removed when the legacy polling functions are removed. +type jitteredBackoffManagerImpl struct { + clock clock.Clock + duration time.Duration + jitter float64 + backoffTimer clock.Timer +} + +// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter +// is negative, backoff will not be jittered. +// +// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a +// Backoff struct and invoke Timer() when calling wait.BackoffUntil. +// +// Instead of: +// +// bm := wait.NewJitteredBackoffManager(duration, jitter, clock) +// ... +// wait.BackoffUntil(..., bm.Backoff, ...) +// +// Use: +// +// wait.BackoffUntil(..., wait.Backoff{Duration: duration, Jitter: jitter}.Timer(), ...) +func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager { + return &jitteredBackoffManagerImpl{ + clock: c, + duration: duration, + jitter: jitter, + backoffTimer: nil, + } +} + +func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { + jitteredPeriod := j.duration + if j.jitter > 0.0 { + jitteredPeriod = Jitter(j.duration, j.jitter) + } + return jitteredPeriod +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. +// The returned timer must be drained before calling Backoff() the second time +func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { + backoff := j.getNextBackoff() + if j.backoffTimer == nil { + j.backoffTimer = j.clock.NewTimer(backoff) + } else { + j.backoffTimer.Reset(backoff) + } + return j.backoffTimer +} + +// ExponentialBackoff repeats a condition check with exponential backoff. +// +// It repeatedly checks the condition and then sleeps, using `backoff.Step()` +// to determine the length of the sleep and adjust Duration and Steps. +// Stops and returns as soon as: +// 1. the condition check returns true or an error, +// 2. `backoff.Steps` checks of the condition have been done, or +// 3. a sleep truncated by the cap on duration has been completed. +// In case (1) the returned error is what the condition function returned. +// In all other cases, ErrWaitTimeout is returned. +// +// Since backoffs are often subject to cancellation, we recommend using +// ExponentialBackoffWithContext and passing a context to the method. +func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { + for backoff.Steps > 0 { + if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { + return err + } + if backoff.Steps == 1 { + break + } + time.Sleep(backoff.Step()) + } + return ErrWaitTimeout +} + +// ExponentialBackoffWithContext repeats a condition check with exponential backoff. +// It immediately returns an error if the condition returns an error, the context is cancelled +// or hits the deadline, or if the maximum attempts defined in backoff is exceeded (ErrWaitTimeout). +// If an error is returned by the condition the backoff stops immediately. The condition will +// never be invoked more than backoff.Steps times. +func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionWithContextFunc) error { + for backoff.Steps > 0 { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if ok, err := runConditionWithCrashProtectionWithContext(ctx, condition); err != nil || ok { + return err + } + + if backoff.Steps == 1 { + break + } + + waitBeforeRetry := backoff.Step() + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(waitBeforeRetry): + } + } + + return ErrWaitTimeout +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go b/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go new file mode 100644 index 00000000..1d3dcaa7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go @@ -0,0 +1,51 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "sync" + "time" + + "k8s.io/utils/clock" +) + +// DelayFunc returns the next time interval to wait. +type DelayFunc func() time.Duration + +// Timer takes an arbitrary delay function and returns a timer that can handle arbitrary interval changes. +// Use Backoff{...}.Timer() for simple delays and more efficient timers. +func (fn DelayFunc) Timer(c clock.Clock) Timer { + return &variableTimer{fn: fn, new: c.NewTimer} +} + +// Until takes an arbitrary delay function and runs until cancelled or the condition indicates exit. This +// offers all of the functionality of the methods in this package. +func (fn DelayFunc) Until(ctx context.Context, immediate, sliding bool, condition ConditionWithContextFunc) error { + return loopConditionUntilContext(ctx, &variableTimer{fn: fn, new: internalClock.NewTimer}, immediate, sliding, condition) +} + +// Concurrent returns a version of this DelayFunc that is safe for use by multiple goroutines that +// wish to share a single delay timer. +func (fn DelayFunc) Concurrent() DelayFunc { + var lock sync.Mutex + return func() time.Duration { + lock.Lock() + defer lock.Unlock() + return fn() + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/error.go b/vendor/k8s.io/apimachinery/pkg/util/wait/error.go new file mode 100644 index 00000000..dd75801d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/error.go @@ -0,0 +1,96 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "errors" +) + +// ErrWaitTimeout is returned when the condition was not satisfied in time. +// +// Deprecated: This type will be made private in favor of Interrupted() +// for checking errors or ErrorInterrupted(err) for returning a wrapped error. +var ErrWaitTimeout = ErrorInterrupted(errors.New("timed out waiting for the condition")) + +// Interrupted returns true if the error indicates a Poll, ExponentialBackoff, or +// Until loop exited for any reason besides the condition returning true or an +// error. A loop is considered interrupted if the calling context is cancelled, +// the context reaches its deadline, or a backoff reaches its maximum allowed +// steps. +// +// Callers should use this method instead of comparing the error value directly to +// ErrWaitTimeout, as methods that cancel a context may not return that error. +// +// Instead of: +// +// err := wait.Poll(...) +// if err == wait.ErrWaitTimeout { +// log.Infof("Wait for operation exceeded") +// } else ... +// +// Use: +// +// err := wait.Poll(...) +// if wait.Interrupted(err) { +// log.Infof("Wait for operation exceeded") +// } else ... +func Interrupted(err error) bool { + switch { + case errors.Is(err, errWaitTimeout), + errors.Is(err, context.Canceled), + errors.Is(err, context.DeadlineExceeded): + return true + default: + return false + } +} + +// errInterrupted +type errInterrupted struct { + cause error +} + +// ErrorInterrupted returns an error that indicates the wait was ended +// early for a given reason. If no cause is provided a generic error +// will be used but callers are encouraged to provide a real cause for +// clarity in debugging. +func ErrorInterrupted(cause error) error { + switch cause.(type) { + case errInterrupted: + // no need to wrap twice since errInterrupted is only needed + // once in a chain + return cause + default: + return errInterrupted{cause} + } +} + +// errWaitTimeout is the private version of the previous ErrWaitTimeout +// and is private to prevent direct comparison. Use ErrorInterrupted(err) +// to get an error that will return true for Interrupted(err). +var errWaitTimeout = errInterrupted{} + +func (e errInterrupted) Unwrap() error { return e.cause } +func (e errInterrupted) Is(target error) bool { return target == errWaitTimeout } +func (e errInterrupted) Error() string { + if e.cause == nil { + // returns the same error message as historical behavior + return "timed out waiting for the condition" + } + return e.cause.Error() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go new file mode 100644 index 00000000..0dd13c62 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go @@ -0,0 +1,97 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// loopConditionUntilContext executes the provided condition at intervals defined by +// the provided timer until the provided context is cancelled, the condition returns +// true, or the condition returns an error. If sliding is true, the period is computed +// after condition runs. If it is false then period includes the runtime for condition. +// If immediate is false the first delay happens before any call to condition, if +// immediate is true the condition will be invoked before waiting and guarantees that +// the condition is invoked at least once, regardless of whether the context has been +// cancelled. The returned error is the error returned by the last condition or the +// context error if the context was terminated. +// +// This is the common loop construct for all polling in the wait package. +func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding bool, condition ConditionWithContextFunc) error { + defer t.Stop() + + var timeCh <-chan time.Time + doneCh := ctx.Done() + + // if immediate is true the condition is + // guaranteed to be executed at least once, + // if we haven't requested immediate execution, delay once + if immediate { + if ok, err := func() (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) + }(); err != nil || ok { + return err + } + } else { + timeCh = t.C() + select { + case <-doneCh: + return ctx.Err() + case <-timeCh: + } + } + + for { + // checking ctx.Err() is slightly faster than checking a select + if err := ctx.Err(); err != nil { + return err + } + + if !sliding { + t.Next() + } + if ok, err := func() (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) + }(); err != nil || ok { + return err + } + if sliding { + t.Next() + } + + if timeCh == nil { + timeCh = t.C() + } + + // NOTE: b/c there is no priority selection in golang + // it is possible for this to race, meaning we could + // trigger t.C and doneCh, and t.C select falls through. + // In order to mitigate we re-check doneCh at the beginning + // of every loop to guarantee at-most one extra execution + // of condition. + select { + case <-doneCh: + return ctx.Err() + case <-timeCh: + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go b/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go new file mode 100644 index 00000000..32e8688c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go @@ -0,0 +1,315 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "time" +) + +// PollUntilContextCancel tries a condition func until it returns true, an error, or the context +// is cancelled or hits a deadline. condition will be invoked after the first interval if the +// context is not cancelled first. The returned error will be from ctx.Err(), the condition's +// err return value, or nil. If invoking condition takes longer than interval the next condition +// will be invoked immediately. When using very short intervals, condition may be invoked multiple +// times before a context cancellation is detected. If immediate is true, condition will be +// invoked before waiting and guarantees that condition is invoked at least once, regardless of +// whether the context has been cancelled. +func PollUntilContextCancel(ctx context.Context, interval time.Duration, immediate bool, condition ConditionWithContextFunc) error { + return loopConditionUntilContext(ctx, Backoff{Duration: interval}.Timer(), immediate, false, condition) +} + +// PollUntilContextTimeout will terminate polling after timeout duration by setting a context +// timeout. This is provided as a convenience function for callers not currently executing under +// a deadline and is equivalent to: +// +// deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout) +// err := PollUntilContextCancel(ctx, interval, immediate, condition) +// +// The deadline context will be cancelled if the Poll succeeds before the timeout, simplifying +// inline usage. All other behavior is identical to PollWithContextTimeout. +func PollUntilContextTimeout(ctx context.Context, interval, timeout time.Duration, immediate bool, condition ConditionWithContextFunc) error { + deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout) + defer deadlineCancel() + return loopConditionUntilContext(deadlineCtx, Backoff{Duration: interval}.Timer(), immediate, false, condition) +} + +// Poll tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// Poll always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +// +// Deprecated: This method does not return errors from context, use PollWithContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func Poll(interval, timeout time.Duration, condition ConditionFunc) error { + return PollWithContext(context.Background(), interval, timeout, condition.WithContext()) +} + +// PollWithContext tries a condition func until it returns true, an error, +// or when the context expires or the timeout is reached, whichever +// happens first. +// +// PollWithContext always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +// +// Deprecated: This method does not return errors from context, use PollWithContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, timeout), condition) +} + +// PollUntil tries a condition func until it returns true, an error or stopCh is +// closed. +// +// PollUntil always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return PollUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext()) +} + +// PollUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollUntilWithContext always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) +} + +// PollInfinite tries a condition func until it returns true or an error +// +// PollInfinite always waits the interval before the run of 'condition'. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollInfinite(interval time.Duration, condition ConditionFunc) error { + return PollInfiniteWithContext(context.Background(), interval, condition.WithContext()) +} + +// PollInfiniteWithContext tries a condition func until it returns true or an error +// +// PollInfiniteWithContext always waits the interval before the run of 'condition'. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) +} + +// PollImmediate tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// PollImmediate always checks 'condition' before waiting for the interval. 'condition' +// will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +// +// Deprecated: This method does not return errors from context, use PollWithContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { + return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext()) +} + +// PollImmediateWithContext tries a condition func until it returns true, an error, +// or the timeout is reached or the specified context expires, whichever happens first. +// +// PollImmediateWithContext always checks 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +// +// Deprecated: This method does not return errors from context, use PollWithContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, timeout), condition) +} + +// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. +// +// PollImmediateUntil runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return PollImmediateUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext()) +} + +// PollImmediateUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// PollImmediateInfinite tries a condition func until it returns true or an error +// +// PollImmediateInfinite runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { + return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext()) +} + +// PollImmediateInfiniteWithContext tries a condition func until it returns true +// or an error or the specified context gets cancelled or expired. +// +// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// Internally used, each of the public 'Poll*' function defined in this +// package should invoke this internal function with appropriate parameters. +// ctx: the context specified by the caller, for infinite polling pass +// a context that never gets cancelled or expired. +// immediate: if true, the 'condition' will be invoked before waiting for the interval, +// in this case 'condition' will always be invoked at least once. +// wait: user specified WaitFunc function that controls at what interval the condition +// function should be invoked periodically and whether it is bound by a timeout. +// condition: user specified ConditionWithContextFunc function. +// +// Deprecated: will be removed in favor of loopConditionUntilContext. +func poll(ctx context.Context, immediate bool, wait waitWithContextFunc, condition ConditionWithContextFunc) error { + if immediate { + done, err := runConditionWithCrashProtectionWithContext(ctx, condition) + if err != nil { + return err + } + if done { + return nil + } + } + + select { + case <-ctx.Done(): + // returning ctx.Err() will break backward compatibility, use new PollUntilContext* + // methods instead + return ErrWaitTimeout + default: + return waitForWithContext(ctx, wait, condition) + } +} + +// poller returns a WaitFunc that will send to the channel every interval until +// timeout has elapsed and then closes the channel. +// +// Over very short intervals you may receive no ticks before the channel is +// closed. A timeout of 0 is interpreted as an infinity, and in such a case +// it would be the caller's responsibility to close the done channel. +// Failure to do so would result in a leaked goroutine. +// +// Output ticks are not buffered. If the channel is not ready to receive an +// item, the tick is skipped. +// +// Deprecated: Will be removed in a future release. +func poller(interval, timeout time.Duration) waitWithContextFunc { + return waitWithContextFunc(func(ctx context.Context) <-chan struct{} { + ch := make(chan struct{}) + + go func() { + defer close(ch) + + tick := time.NewTicker(interval) + defer tick.Stop() + + var after <-chan time.Time + if timeout != 0 { + // time.After is more convenient, but it + // potentially leaves timers around much longer + // than necessary if we exit early. + timer := time.NewTimer(timeout) + after = timer.C + defer timer.Stop() + } + + for { + select { + case <-tick.C: + // If the consumer isn't ready for this signal drop it and + // check the other channels. + select { + case ch <- struct{}{}: + default: + } + case <-after: + return + case <-ctx.Done(): + return + } + } + }() + + return ch + }) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go b/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go new file mode 100644 index 00000000..3efba321 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go @@ -0,0 +1,121 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "time" + + "k8s.io/utils/clock" +) + +// Timer abstracts how wait functions interact with time runtime efficiently. Test +// code may implement this interface directly but package consumers are encouraged +// to use the Backoff type as the primary mechanism for acquiring a Timer. The +// interface is a simplification of clock.Timer to prevent misuse. Timers are not +// expected to be safe for calls from multiple goroutines. +type Timer interface { + // C returns a channel that will receive a struct{} each time the timer fires. + // The channel should not be waited on after Stop() is invoked. It is allowed + // to cache the returned value of C() for the lifetime of the Timer. + C() <-chan time.Time + // Next is invoked by wait functions to signal timers that the next interval + // should begin. You may only use Next() if you have drained the channel C(). + // You should not call Next() after Stop() is invoked. + Next() + // Stop releases the timer. It is safe to invoke if no other methods have been + // called. + Stop() +} + +type noopTimer struct { + closedCh <-chan time.Time +} + +// newNoopTimer creates a timer with a unique channel to avoid contention +// for the channel's lock across multiple unrelated timers. +func newNoopTimer() noopTimer { + ch := make(chan time.Time) + close(ch) + return noopTimer{closedCh: ch} +} + +func (t noopTimer) C() <-chan time.Time { + return t.closedCh +} +func (noopTimer) Next() {} +func (noopTimer) Stop() {} + +type variableTimer struct { + fn DelayFunc + t clock.Timer + new func(time.Duration) clock.Timer +} + +func (t *variableTimer) C() <-chan time.Time { + if t.t == nil { + d := t.fn() + t.t = t.new(d) + } + return t.t.C() +} +func (t *variableTimer) Next() { + if t.t == nil { + return + } + d := t.fn() + t.t.Reset(d) +} +func (t *variableTimer) Stop() { + if t.t == nil { + return + } + t.t.Stop() + t.t = nil +} + +type fixedTimer struct { + interval time.Duration + t clock.Ticker + new func(time.Duration) clock.Ticker +} + +func (t *fixedTimer) C() <-chan time.Time { + if t.t == nil { + t.t = t.new(t.interval) + } + return t.t.C() +} +func (t *fixedTimer) Next() { + // no-op for fixed timers +} +func (t *fixedTimer) Stop() { + if t.t == nil { + return + } + t.t.Stop() + t.t = nil +} + +var ( + // RealTimer can be passed to methods that need a clock.Timer. + RealTimer = clock.RealClock{}.NewTimer +) + +var ( + // internalClock is used for test injection of clocks + internalClock = clock.RealClock{} +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 137627b4..6805e8cf 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -18,14 +18,11 @@ package wait import ( "context" - "errors" - "math" "math/rand" "sync" "time" "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/utils/clock" ) // For any test of the style: @@ -83,113 +80,6 @@ func Forever(f func(), period time.Duration) { Until(f, period, NeverStop) } -// Until loops until stop channel is closed, running f every period. -// -// Until is syntactic sugar on top of JitterUntil with zero jitter factor and -// with sliding = true (which means the timer for period starts after the f -// completes). -func Until(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, true, stopCh) -} - -// UntilWithContext loops until context is done, running f every period. -// -// UntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor and with sliding = true (which means the timer -// for period starts after the f completes). -func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, true) -} - -// NonSlidingUntil loops until stop channel is closed, running f every -// period. -// -// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter -// factor, with sliding = false (meaning the timer for period starts at the same -// time as the function starts). -func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, false, stopCh) -} - -// NonSlidingUntilWithContext loops until context is done, running f every -// period. -// -// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor, with sliding = false (meaning the timer for period -// starts at the same time as the function starts). -func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, false) -} - -// JitterUntil loops until stop channel is closed, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Close stopCh to stop. f may not be invoked if stop channel is already -// closed. Pass NeverStop to if you don't want it stop. -func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { - BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) -} - -// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { - var t clock.Timer - for { - select { - case <-stopCh: - return - default: - } - - if !sliding { - t = backoff.Backoff() - } - - func() { - defer runtime.HandleCrash() - f() - }() - - if sliding { - t = backoff.Backoff() - } - - // NOTE: b/c there is no priority selection in golang - // it is possible for this to race, meaning we could - // trigger t.C and stopCh, and t.C select falls through. - // In order to mitigate we re-check stopCh at the beginning - // of every loop to prevent extra executions of f(). - select { - case <-stopCh: - if !t.Stop() { - <-t.C() - } - return - case <-t.C(): - } - } -} - -// JitterUntilWithContext loops until context is done, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Cancel context to stop. f may not be invoked if context is already expired. -func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { - JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) -} - // Jitter returns a time.Duration between duration and duration + maxFactor * // duration. // @@ -203,9 +93,6 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration { return wait } -// ErrWaitTimeout is returned when the condition exited without success. -var ErrWaitTimeout = errors.New("timed out waiting for the condition") - // ConditionFunc returns true if the condition is satisfied, or an error // if the loop should be aborted. type ConditionFunc func() (done bool, err error) @@ -223,425 +110,80 @@ func (cf ConditionFunc) WithContext() ConditionWithContextFunc { } } -// runConditionWithCrashProtection runs a ConditionFunc with crash protection -func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { - return runConditionWithCrashProtectionWithContext(context.TODO(), condition.WithContext()) -} - -// runConditionWithCrashProtectionWithContext runs a -// ConditionWithContextFunc with crash protection. -func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { - defer runtime.HandleCrash() - return condition(ctx) -} - -// Backoff holds parameters applied to a Backoff function. -type Backoff struct { - // The initial duration. - Duration time.Duration - // Duration is multiplied by factor each iteration, if factor is not zero - // and the limits imposed by Steps and Cap have not been reached. - // Should not be negative. - // The jitter does not contribute to the updates to the duration parameter. - Factor float64 - // The sleep at each iteration is the duration plus an additional - // amount chosen uniformly at random from the interval between - // zero and `jitter*duration`. - Jitter float64 - // The remaining number of iterations in which the duration - // parameter may change (but progress can be stopped earlier by - // hitting the cap). If not positive, the duration is not - // changed. Used for exponential backoff in combination with - // Factor and Cap. - Steps int - // A limit on revised values of the duration parameter. If a - // multiplication by the factor parameter would make the duration - // exceed the cap then the duration is set to the cap and the - // steps parameter is set to zero. - Cap time.Duration -} - -// Step (1) returns an amount of time to sleep determined by the -// original Duration and Jitter and (2) mutates the provided Backoff -// to update its Steps and Duration. -func (b *Backoff) Step() time.Duration { - if b.Steps < 1 { - if b.Jitter > 0 { - return Jitter(b.Duration, b.Jitter) - } - return b.Duration - } - b.Steps-- - - duration := b.Duration - - // calculate the next step - if b.Factor != 0 { - b.Duration = time.Duration(float64(b.Duration) * b.Factor) - if b.Cap > 0 && b.Duration > b.Cap { - b.Duration = b.Cap - b.Steps = 0 - } - } - - if b.Jitter > 0 { - duration = Jitter(duration, b.Jitter) - } - return duration -} - -// ContextForChannel derives a child context from a parent channel. -// -// The derived context's Done channel is closed when the returned cancel function -// is called or when the parent channel is closed, whichever happens first. -// -// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. -func ContextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - select { - case <-parentCh: - cancel() - case <-ctx.Done(): - } - }() - return ctx, cancel -} - -// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides -// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff() -// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in -// undetermined behavior. -// The BackoffManager is supposed to be called in a single-threaded environment. -type BackoffManager interface { - Backoff() clock.Timer -} - -type exponentialBackoffManagerImpl struct { - backoff *Backoff - backoffTimer clock.Timer - lastBackoffStart time.Time - initialBackoff time.Duration - backoffResetDuration time.Duration - clock clock.Clock -} - -// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and -// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset. -// This backoff manager is used to reduce load during upstream unhealthiness. -func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager { - return &exponentialBackoffManagerImpl{ - backoff: &Backoff{ - Duration: initBackoff, - Factor: backoffFactor, - Jitter: jitter, - - // the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not - // what we ideally need here, we set it to max int and assume we will never use up the steps - Steps: math.MaxInt32, - Cap: maxBackoff, - }, - backoffTimer: nil, - initialBackoff: initBackoff, - lastBackoffStart: c.Now(), - backoffResetDuration: resetDuration, - clock: c, - } -} - -func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { - if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration { - b.backoff.Steps = math.MaxInt32 - b.backoff.Duration = b.initialBackoff - } - b.lastBackoffStart = b.clock.Now() - return b.backoff.Step() -} - -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. -// The returned timer must be drained before calling Backoff() the second time -func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { - if b.backoffTimer == nil { - b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) - } else { - b.backoffTimer.Reset(b.getNextBackoff()) - } - return b.backoffTimer -} - -type jitteredBackoffManagerImpl struct { - clock clock.Clock - duration time.Duration - jitter float64 - backoffTimer clock.Timer -} - -// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter -// is negative, backoff will not be jittered. -func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager { - return &jitteredBackoffManagerImpl{ - clock: c, - duration: duration, - jitter: jitter, - backoffTimer: nil, - } -} - -func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { - jitteredPeriod := j.duration - if j.jitter > 0.0 { - jitteredPeriod = Jitter(j.duration, j.jitter) - } - return jitteredPeriod +// ContextForChannel provides a context that will be treated as cancelled +// when the provided parentCh is closed. The implementation returns +// context.Canceled for Err() if and only if the parentCh is closed. +func ContextForChannel(parentCh <-chan struct{}) context.Context { + return channelContext{stopCh: parentCh} } -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. -// The returned timer must be drained before calling Backoff() the second time -func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { - backoff := j.getNextBackoff() - if j.backoffTimer == nil { - j.backoffTimer = j.clock.NewTimer(backoff) - } else { - j.backoffTimer.Reset(backoff) - } - return j.backoffTimer -} +var _ context.Context = channelContext{} -// ExponentialBackoff repeats a condition check with exponential backoff. -// -// It repeatedly checks the condition and then sleeps, using `backoff.Step()` -// to determine the length of the sleep and adjust Duration and Steps. -// Stops and returns as soon as: -// 1. the condition check returns true or an error, -// 2. `backoff.Steps` checks of the condition have been done, or -// 3. a sleep truncated by the cap on duration has been completed. -// In case (1) the returned error is what the condition function returned. -// In all other cases, ErrWaitTimeout is returned. -func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { - return err - } - if backoff.Steps == 1 { - break - } - time.Sleep(backoff.Step()) - } - return ErrWaitTimeout -} - -// Poll tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// Poll always waits the interval before the run of 'condition'. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to Poll something forever, see PollInfinite. -func Poll(interval, timeout time.Duration, condition ConditionFunc) error { - return PollWithContext(context.Background(), interval, timeout, condition.WithContext()) -} - -// PollWithContext tries a condition func until it returns true, an error, -// or when the context expires or the timeout is reached, whichever -// happens first. -// -// PollWithContext always waits the interval before the run of 'condition'. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to Poll something forever, see PollInfinite. -func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, timeout), condition) -} - -// PollUntil tries a condition func until it returns true, an error or stopCh is +// channelContext will behave as if the context were cancelled when stopCh is // closed. -// -// PollUntil always waits interval before the first run of 'condition'. -// 'condition' will always be invoked at least once. -func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := ContextForChannel(stopCh) - defer cancel() - return PollUntilWithContext(ctx, interval, condition.WithContext()) -} - -// PollUntilWithContext tries a condition func until it returns true, -// an error or the specified context is cancelled or expired. -// -// PollUntilWithContext always waits interval before the first run of 'condition'. -// 'condition' will always be invoked at least once. -func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, 0), condition) -} - -// PollInfinite tries a condition func until it returns true or an error -// -// PollInfinite always waits the interval before the run of 'condition'. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollInfinite(interval time.Duration, condition ConditionFunc) error { - return PollInfiniteWithContext(context.Background(), interval, condition.WithContext()) -} - -// PollInfiniteWithContext tries a condition func until it returns true or an error -// -// PollInfiniteWithContext always waits the interval before the run of 'condition'. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, 0), condition) +type channelContext struct { + stopCh <-chan struct{} } -// PollImmediate tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// PollImmediate always checks 'condition' before waiting for the interval. 'condition' -// will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { - return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext()) -} - -// PollImmediateWithContext tries a condition func until it returns true, an error, -// or the timeout is reached or the specified context expires, whichever happens first. -// -// PollImmediateWithContext always checks 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, timeout), condition) -} - -// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. -// -// PollImmediateUntil runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := ContextForChannel(stopCh) - defer cancel() - return PollImmediateUntilWithContext(ctx, interval, condition.WithContext()) -} - -// PollImmediateUntilWithContext tries a condition func until it returns true, -// an error or the specified context is cancelled or expired. -// -// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, 0), condition) +func (c channelContext) Done() <-chan struct{} { return c.stopCh } +func (c channelContext) Err() error { + select { + case <-c.stopCh: + return context.Canceled + default: + return nil + } } +func (c channelContext) Deadline() (time.Time, bool) { return time.Time{}, false } +func (c channelContext) Value(key any) any { return nil } -// PollImmediateInfinite tries a condition func until it returns true or an error +// runConditionWithCrashProtection runs a ConditionFunc with crash protection. // -// PollImmediateInfinite runs the 'condition' before waiting for the interval. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { - return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext()) +// Deprecated: Will be removed when the legacy polling methods are removed. +func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { + defer runtime.HandleCrash() + return condition() } -// PollImmediateInfiniteWithContext tries a condition func until it returns true -// or an error or the specified context gets cancelled or expired. +// runConditionWithCrashProtectionWithContext runs a ConditionWithContextFunc +// with crash protection. // -// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, 0), condition) -} - -// Internally used, each of the public 'Poll*' function defined in this -// package should invoke this internal function with appropriate parameters. -// ctx: the context specified by the caller, for infinite polling pass -// a context that never gets cancelled or expired. -// immediate: if true, the 'condition' will be invoked before waiting for the interval, -// in this case 'condition' will always be invoked at least once. -// wait: user specified WaitFunc function that controls at what interval the condition -// function should be invoked periodically and whether it is bound by a timeout. -// condition: user specified ConditionWithContextFunc function. -func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, condition ConditionWithContextFunc) error { - if immediate { - done, err := runConditionWithCrashProtectionWithContext(ctx, condition) - if err != nil { - return err - } - if done { - return nil - } - } - - select { - case <-ctx.Done(): - // returning ctx.Err() will break backward compatibility - return ErrWaitTimeout - default: - return WaitForWithContext(ctx, wait, condition) - } +// Deprecated: Will be removed when the legacy polling methods are removed. +func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) } -// WaitFunc creates a channel that receives an item every time a test +// waitFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. -type WaitFunc func(done <-chan struct{}) <-chan struct{} +// +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +type waitFunc func(done <-chan struct{}) <-chan struct{} // WithContext converts the WaitFunc to an equivalent WaitWithContextFunc -func (w WaitFunc) WithContext() WaitWithContextFunc { +func (w waitFunc) WithContext() waitWithContextFunc { return func(ctx context.Context) <-chan struct{} { return w(ctx.Done()) } } -// WaitWithContextFunc creates a channel that receives an item every time a test +// waitWithContextFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. // // When the specified context gets cancelled or expires the function // stops sending item and returns immediately. -type WaitWithContextFunc func(ctx context.Context) <-chan struct{} - -// WaitFor continually checks 'fn' as driven by 'wait'. // -// WaitFor gets a channel from 'wait()â€, and then invokes 'fn' once for every value -// placed on the channel and once more when the channel is closed. If the channel is closed -// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout. -// -// If 'fn' returns an error the loop ends and that error is returned. If -// 'fn' returns true the loop ends and nil is returned. -// -// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever -// returning true. -// -// When the done channel is closed, because the golang `select` statement is -// "uniform pseudo-random", the `fn` might still run one or multiple time, -// though eventually `WaitFor` will return. -func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - ctx, cancel := ContextForChannel(done) - defer cancel() - return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext()) -} +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +type waitWithContextFunc func(ctx context.Context) <-chan struct{} -// WaitForWithContext continually checks 'fn' as driven by 'wait'. +// waitForWithContext continually checks 'fn' as driven by 'wait'. // -// WaitForWithContext gets a channel from 'wait()â€, and then invokes 'fn' +// waitForWithContext gets a channel from 'wait()â€, and then invokes 'fn' // once for every value placed on the channel and once more when the // channel is closed. If the channel is closed and 'fn' -// returns false without error, WaitForWithContext returns ErrWaitTimeout. +// returns false without error, waitForWithContext returns ErrWaitTimeout. // // If 'fn' returns an error the loop ends and that error is returned. If // 'fn' returns true the loop ends and nil is returned. @@ -651,8 +193,11 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { // // When the ctx.Done() channel is closed, because the golang `select` statement is // "uniform pseudo-random", the `fn` might still run one or multiple times, -// though eventually `WaitForWithContext` will return. -func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn ConditionWithContextFunc) error { +// though eventually `waitForWithContext` will return. +// +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +func waitForWithContext(ctx context.Context, wait waitWithContextFunc, fn ConditionWithContextFunc) error { waitCtx, cancel := context.WithCancel(context.Background()) defer cancel() c := wait(waitCtx) @@ -670,88 +215,9 @@ func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn Condit return ErrWaitTimeout } case <-ctx.Done(): - // returning ctx.Err() will break backward compatibility + // returning ctx.Err() will break backward compatibility, use new PollUntilContext* + // methods instead return ErrWaitTimeout } } } - -// poller returns a WaitFunc that will send to the channel every interval until -// timeout has elapsed and then closes the channel. -// -// Over very short intervals you may receive no ticks before the channel is -// closed. A timeout of 0 is interpreted as an infinity, and in such a case -// it would be the caller's responsibility to close the done channel. -// Failure to do so would result in a leaked goroutine. -// -// Output ticks are not buffered. If the channel is not ready to receive an -// item, the tick is skipped. -func poller(interval, timeout time.Duration) WaitWithContextFunc { - return WaitWithContextFunc(func(ctx context.Context) <-chan struct{} { - ch := make(chan struct{}) - - go func() { - defer close(ch) - - tick := time.NewTicker(interval) - defer tick.Stop() - - var after <-chan time.Time - if timeout != 0 { - // time.After is more convenient, but it - // potentially leaves timers around much longer - // than necessary if we exit early. - timer := time.NewTimer(timeout) - after = timer.C - defer timer.Stop() - } - - for { - select { - case <-tick.C: - // If the consumer isn't ready for this signal drop it and - // check the other channels. - select { - case ch <- struct{}{}: - default: - } - case <-after: - return - case <-ctx.Done(): - return - } - } - }() - - return ch - }) -} - -// ExponentialBackoffWithContext works with a request context and a Backoff. It ensures that the retry wait never -// exceeds the deadline specified by the request context. -func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { - return err - } - - if backoff.Steps == 1 { - break - } - - waitBeforeRetry := backoff.Step() - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(waitBeforeRetry): - } - } - - return ErrWaitTimeout -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go new file mode 100644 index 00000000..ea1dc377 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// with apply. +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// apply. +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go index eba37baf..faff51a0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go @@ -37,6 +37,7 @@ type MutatingWebhookApplyConfiguration struct { TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with @@ -139,3 +140,16 @@ func (b *MutatingWebhookApplyConfiguration) WithReinvocationPolicy(value admissi b.ReinvocationPolicy = &value return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *MutatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go index d0691de1..613856ba 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go @@ -36,6 +36,7 @@ type ValidatingWebhookApplyConfiguration struct { SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects,omitempty"` TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with @@ -130,3 +131,16 @@ func (b *ValidatingWebhookApplyConfiguration) WithAdmissionReviewVersions(values } return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go deleted file mode 100644 index 4936110f..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" -) - -// AdmissionPolicySpecApplyConfiguration represents an declarative configuration of the AdmissionPolicySpec type for use -// with apply. -type AdmissionPolicySpecApplyConfiguration struct { - ParamSource *ParamSourceApplyConfiguration `json:"paramSource,omitempty"` - MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` - Validations []ValidationApplyConfiguration `json:"validations,omitempty"` - FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"` -} - -// AdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the AdmissionPolicySpec type for use with -// apply. -func AdmissionPolicySpec() *AdmissionPolicySpecApplyConfiguration { - return &AdmissionPolicySpecApplyConfiguration{} -} - -// WithParamSource sets the ParamSource field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ParamSource field is set to the value of the last call. -func (b *AdmissionPolicySpecApplyConfiguration) WithParamSource(value *ParamSourceApplyConfiguration) *AdmissionPolicySpecApplyConfiguration { - b.ParamSource = value - return b -} - -// WithMatchResources sets the MatchResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MatchResources field is set to the value of the last call. -func (b *AdmissionPolicySpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *AdmissionPolicySpecApplyConfiguration { - b.MatchResources = value - return b -} - -// WithValidations adds the given value to the Validations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Validations field. -func (b *AdmissionPolicySpecApplyConfiguration) WithValidations(values ...*ValidationApplyConfiguration) *AdmissionPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithValidations") - } - b.Validations = append(b.Validations, *values[i]) - } - return b -} - -// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FailurePolicy field is set to the value of the last call. -func (b *AdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1alpha1.FailurePolicyType) *AdmissionPolicySpecApplyConfiguration { - b.FailurePolicy = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go new file mode 100644 index 00000000..02369513 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// with apply. +type AuditAnnotationApplyConfiguration struct { + Key *string `json:"key,omitempty"` + ValueExpression *string `json:"valueExpression,omitempty"` +} + +// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// apply. +func AuditAnnotation() *AuditAnnotationApplyConfiguration { + return &AuditAnnotationApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithKey(value string) *AuditAnnotationApplyConfiguration { + b.Key = &value + return b +} + +// WithValueExpression sets the ValueExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ValueExpression field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithValueExpression(value string) *AuditAnnotationApplyConfiguration { + b.ValueExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go new file mode 100644 index 00000000..f8b511f5 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// with apply. +type ExpressionWarningApplyConfiguration struct { + FieldRef *string `json:"fieldRef,omitempty"` + Warning *string `json:"warning,omitempty"` +} + +// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// apply. +func ExpressionWarning() *ExpressionWarningApplyConfiguration { + return &ExpressionWarningApplyConfiguration{} +} + +// WithFieldRef sets the FieldRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FieldRef field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithFieldRef(value string) *ExpressionWarningApplyConfiguration { + b.FieldRef = &value + return b +} + +// WithWarning sets the Warning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Warning field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithWarning(value string) *ExpressionWarningApplyConfiguration { + b.Warning = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go similarity index 50% rename from vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go rename to vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go index a7a5a6af..186c750f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go @@ -18,31 +18,31 @@ limitations under the License. package v1alpha1 -// ParamSourceApplyConfiguration represents an declarative configuration of the ParamSource type for use +// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use // with apply. -type ParamSourceApplyConfiguration struct { - APIVersion *string `json:"apiVersion,omitempty"` - Kind *string `json:"kind,omitempty"` +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` } -// ParamSourceApplyConfiguration constructs an declarative configuration of the ParamSource type for use with +// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with // apply. -func ParamSource() *ParamSourceApplyConfiguration { - return &ParamSourceApplyConfiguration{} +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} } -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ParamSourceApplyConfiguration) WithAPIVersion(value string) *ParamSourceApplyConfiguration { - b.APIVersion = &value +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value return b } -// WithKind sets the Kind field in the declarative configuration to the given value +// WithExpression sets the Expression field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ParamSourceApplyConfiguration) WithKind(value string) *ParamSourceApplyConfiguration { - b.Kind = &value +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go deleted file mode 100644 index 313de9d5..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" -) - -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use -// with apply. -type RuleApplyConfiguration struct { - APIGroups []string `json:"apiGroups,omitempty"` - APIVersions []string `json:"apiVersions,omitempty"` - Resources []string `json:"resources,omitempty"` - Scope *v1alpha1.ScopeType `json:"scope,omitempty"` -} - -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with -// apply. -func Rule() *RuleApplyConfiguration { - return &RuleApplyConfiguration{} -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleApplyConfiguration) WithAPIGroups(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleApplyConfiguration) WithAPIVersions(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleApplyConfiguration) WithScope(value v1alpha1.ScopeType) *RuleApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go deleted file mode 100644 index 112f4826..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/admissionregistration/v1" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" -) - -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use -// with apply. -type RuleWithOperationsApplyConfiguration struct { - Operations []v1.OperationType `json:"operations,omitempty"` - admissionregistrationv1.RuleApplyConfiguration `json:",inline"` -} - -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with -// apply. -func RuleWithOperations() *RuleWithOperationsApplyConfiguration { - return &RuleWithOperationsApplyConfiguration{} -} - -// WithOperations adds the given value to the Operations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Operations field. -func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.OperationType) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Operations = append(b.Operations, values[i]) - } - return b -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1.ScopeType) *RuleWithOperationsApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go new file mode 100644 index 00000000..42a91707 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// with apply. +type TypeCheckingApplyConfiguration struct { + ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` +} + +// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// apply. +func TypeChecking() *TypeCheckingApplyConfiguration { + return &TypeCheckingApplyConfiguration{} +} + +// WithExpressionWarnings adds the given value to the ExpressionWarnings field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExpressionWarnings field. +func (b *TypeCheckingApplyConfiguration) WithExpressionWarnings(values ...*ExpressionWarningApplyConfiguration) *TypeCheckingApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExpressionWarnings") + } + b.ExpressionWarnings = append(b.ExpressionWarnings, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go index 3a23e0c7..c860b85c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -32,7 +32,8 @@ import ( type ValidatingAdmissionPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` } // ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with @@ -245,3 +246,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithSpec(value *Validating b.Spec = value return b } + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *ValidatingAdmissionPolicyStatusApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go index f06f6554..c9a4ff7a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go @@ -18,12 +18,17 @@ limitations under the License. package v1alpha1 +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" +) + // ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use // with apply. type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { - PolicyName *string `json:"policyName,omitempty"` - ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` - MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` + PolicyName *string `json:"policyName,omitempty"` + ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` + MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` + ValidationActions []admissionregistrationv1alpha1.ValidationAction `json:"validationActions,omitempty"` } // ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with @@ -55,3 +60,13 @@ func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResour b.MatchResources = value return b } + +// WithValidationActions adds the given value to the ValidationActions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ValidationActions field. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithValidationActions(values ...admissionregistrationv1alpha1.ValidationAction) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + for i := range values { + b.ValidationActions = append(b.ValidationActions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go index cba1e720..f674b5b1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go @@ -29,6 +29,8 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct { MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"` Validations []ValidationApplyConfiguration `json:"validations,omitempty"` FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"` + AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with @@ -73,3 +75,29 @@ func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(valu b.FailurePolicy = &value return b } + +// WithAuditAnnotations adds the given value to the AuditAnnotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AuditAnnotations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithAuditAnnotations(values ...*AuditAnnotationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAuditAnnotations") + } + b.AuditAnnotations = append(b.AuditAnnotations, *values[i]) + } + return b +} + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go new file mode 100644 index 00000000..821184c8 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// with apply. +type ValidatingAdmissionPolicyStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// apply. +func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { + return &ValidatingAdmissionPolicyStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithObservedGeneration(value int64) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithTypeChecking sets the TypeChecking field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TypeChecking field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithTypeChecking(value *TypeCheckingApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.TypeChecking = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go index 43916603..9a5fc847 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go @@ -25,9 +25,10 @@ import ( // ValidationApplyConfiguration represents an declarative configuration of the Validation type for use // with apply. type ValidationApplyConfiguration struct { - Expression *string `json:"expression,omitempty"` - Message *string `json:"message,omitempty"` - Reason *v1.StatusReason `json:"reason,omitempty"` + Expression *string `json:"expression,omitempty"` + Message *string `json:"message,omitempty"` + Reason *v1.StatusReason `json:"reason,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` } // ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with @@ -59,3 +60,11 @@ func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *Valida b.Reason = &value return b } + +// WithMessageExpression sets the MessageExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MessageExpression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessageExpression(value string) *ValidationApplyConfiguration { + b.MessageExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go new file mode 100644 index 00000000..d099b6b6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// with apply. +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// apply. +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go index cc48d3b6..54845341 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go @@ -38,6 +38,7 @@ type MutatingWebhookApplyConfiguration struct { TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` ReinvocationPolicy *admissionregistrationv1beta1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with @@ -140,3 +141,16 @@ func (b *MutatingWebhookApplyConfiguration) WithReinvocationPolicy(value admissi b.ReinvocationPolicy = &value return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *MutatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go deleted file mode 100644 index 21151b99..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" -) - -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use -// with apply. -type RuleApplyConfiguration struct { - APIGroups []string `json:"apiGroups,omitempty"` - APIVersions []string `json:"apiVersions,omitempty"` - Resources []string `json:"resources,omitempty"` - Scope *v1beta1.ScopeType `json:"scope,omitempty"` -} - -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with -// apply. -func Rule() *RuleApplyConfiguration { - return &RuleApplyConfiguration{} -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleApplyConfiguration) WithAPIGroups(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleApplyConfiguration) WithAPIVersions(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleApplyConfiguration) WithScope(value v1beta1.ScopeType) *RuleApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go deleted file mode 100644 index 0fd5dd34..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/admissionregistration/v1" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" -) - -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use -// with apply. -type RuleWithOperationsApplyConfiguration struct { - Operations []v1.OperationType `json:"operations,omitempty"` - admissionregistrationv1.RuleApplyConfiguration `json:",inline"` -} - -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with -// apply. -func RuleWithOperations() *RuleWithOperationsApplyConfiguration { - return &RuleWithOperationsApplyConfiguration{} -} - -// WithOperations adds the given value to the Operations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Operations field. -func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.OperationType) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Operations = append(b.Operations, values[i]) - } - return b -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1.ScopeType) *RuleWithOperationsApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go index 84479b5d..8c5c341b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go @@ -37,6 +37,7 @@ type ValidatingWebhookApplyConfiguration struct { SideEffects *admissionregistrationv1beta1.SideEffectClass `json:"sideEffects,omitempty"` TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with @@ -131,3 +132,16 @@ func (b *ValidatingWebhookApplyConfiguration) WithAdmissionReviewVersions(values } return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go deleted file mode 100644 index 86601cc4..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v2 - -import ( - v1 "k8s.io/api/core/v1" -) - -// PodResourceMetricSourceApplyConfiguration represents an declarative configuration of the PodResourceMetricSource type for use -// with apply. -type PodResourceMetricSourceApplyConfiguration struct { - Name *v1.ResourceName `json:"name,omitempty"` - Target *MetricTargetApplyConfiguration `json:"target,omitempty"` -} - -// PodResourceMetricSourceApplyConfiguration constructs an declarative configuration of the PodResourceMetricSource type for use with -// apply. -func PodResourceMetricSource() *PodResourceMetricSourceApplyConfiguration { - return &PodResourceMetricSourceApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *PodResourceMetricSourceApplyConfiguration) WithName(value v1.ResourceName) *PodResourceMetricSourceApplyConfiguration { - b.Name = &value - return b -} - -// WithTarget sets the Target field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Target field is set to the value of the last call. -func (b *PodResourceMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *PodResourceMetricSourceApplyConfiguration { - b.Target = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podscheduling.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go similarity index 64% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podscheduling.go rename to vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go index 44890c2d..788d2a07 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podscheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,66 +27,63 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSchedulingApplyConfiguration represents an declarative configuration of the PodScheduling type for use +// ClusterTrustBundleApplyConfiguration represents an declarative configuration of the ClusterTrustBundle type for use // with apply. -type PodSchedulingApplyConfiguration struct { +type ClusterTrustBundleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSchedulingSpecApplyConfiguration `json:"spec,omitempty"` - Status *PodSchedulingStatusApplyConfiguration `json:"status,omitempty"` + Spec *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"` } -// PodScheduling constructs an declarative configuration of the PodScheduling type for use with +// ClusterTrustBundle constructs an declarative configuration of the ClusterTrustBundle type for use with // apply. -func PodScheduling(name, namespace string) *PodSchedulingApplyConfiguration { - b := &PodSchedulingApplyConfiguration{} +func ClusterTrustBundle(name string) *ClusterTrustBundleApplyConfiguration { + b := &ClusterTrustBundleApplyConfiguration{} b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("PodScheduling") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithKind("ClusterTrustBundle") + b.WithAPIVersion("certificates.k8s.io/v1alpha1") return b } -// ExtractPodScheduling extracts the applied configuration owned by fieldManager from -// podScheduling. If no managedFields are found in podScheduling for fieldManager, a -// PodSchedulingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractClusterTrustBundle extracts the applied configuration owned by fieldManager from +// clusterTrustBundle. If no managedFields are found in clusterTrustBundle for fieldManager, a +// ClusterTrustBundleApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// podScheduling must be a unmodified PodScheduling API object that was retrieved from the Kubernetes API. -// ExtractPodScheduling provides a way to perform a extract/modify-in-place/apply workflow. +// clusterTrustBundle must be a unmodified ClusterTrustBundle API object that was retrieved from the Kubernetes API. +// ExtractClusterTrustBundle provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodScheduling(podScheduling *resourcev1alpha1.PodScheduling, fieldManager string) (*PodSchedulingApplyConfiguration, error) { - return extractPodScheduling(podScheduling, fieldManager, "") +func ExtractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) { + return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "") } -// ExtractPodSchedulingStatus is the same as ExtractPodScheduling except +// ExtractClusterTrustBundleStatus is the same as ExtractClusterTrustBundle except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSchedulingStatus(podScheduling *resourcev1alpha1.PodScheduling, fieldManager string) (*PodSchedulingApplyConfiguration, error) { - return extractPodScheduling(podScheduling, fieldManager, "status") +func ExtractClusterTrustBundleStatus(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) { + return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "status") } -func extractPodScheduling(podScheduling *resourcev1alpha1.PodScheduling, fieldManager string, subresource string) (*PodSchedulingApplyConfiguration, error) { - b := &PodSchedulingApplyConfiguration{} - err := managedfields.ExtractInto(podScheduling, internal.Parser().Type("io.k8s.api.resource.v1alpha1.PodScheduling"), fieldManager, b, subresource) +func extractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string, subresource string) (*ClusterTrustBundleApplyConfiguration, error) { + b := &ClusterTrustBundleApplyConfiguration{} + err := managedfields.ExtractInto(clusterTrustBundle, internal.Parser().Type("io.k8s.api.certificates.v1alpha1.ClusterTrustBundle"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(podScheduling.Name) - b.WithNamespace(podScheduling.Namespace) + b.WithName(clusterTrustBundle.Name) - b.WithKind("PodScheduling") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithKind("ClusterTrustBundle") + b.WithAPIVersion("certificates.k8s.io/v1alpha1") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithKind(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithKind(value string) *ClusterTrustBundleApplyConfiguration { b.Kind = &value return b } @@ -94,7 +91,7 @@ func (b *PodSchedulingApplyConfiguration) WithKind(value string) *PodSchedulingA // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithAPIVersion(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithAPIVersion(value string) *ClusterTrustBundleApplyConfiguration { b.APIVersion = &value return b } @@ -102,7 +99,7 @@ func (b *PodSchedulingApplyConfiguration) WithAPIVersion(value string) *PodSched // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithName(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithName(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -111,7 +108,7 @@ func (b *PodSchedulingApplyConfiguration) WithName(value string) *PodSchedulingA // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithGenerateName(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithGenerateName(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -120,7 +117,7 @@ func (b *PodSchedulingApplyConfiguration) WithGenerateName(value string) *PodSch // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithNamespace(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithNamespace(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -129,7 +126,7 @@ func (b *PodSchedulingApplyConfiguration) WithNamespace(value string) *PodSchedu // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithUID(value types.UID) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithUID(value types.UID) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -138,7 +135,7 @@ func (b *PodSchedulingApplyConfiguration) WithUID(value types.UID) *PodSchedulin // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithResourceVersion(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithResourceVersion(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -147,7 +144,7 @@ func (b *PodSchedulingApplyConfiguration) WithResourceVersion(value string) *Pod // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithGeneration(value int64) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithGeneration(value int64) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -156,7 +153,7 @@ func (b *PodSchedulingApplyConfiguration) WithGeneration(value int64) *PodSchedu // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -165,7 +162,7 @@ func (b *PodSchedulingApplyConfiguration) WithCreationTimestamp(value metav1.Tim // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -174,7 +171,7 @@ func (b *PodSchedulingApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -184,7 +181,7 @@ func (b *PodSchedulingApplyConfiguration) WithDeletionGracePeriodSeconds(value i // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *PodSchedulingApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithLabels(entries map[string]string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -199,7 +196,7 @@ func (b *PodSchedulingApplyConfiguration) WithLabels(entries map[string]string) // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *PodSchedulingApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -213,7 +210,7 @@ func (b *PodSchedulingApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSchedulingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -227,7 +224,7 @@ func (b *PodSchedulingApplyConfiguration) WithOwnerReferences(values ...*v1.Owne // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSchedulingApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithFinalizers(values ...string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -235,7 +232,7 @@ func (b *PodSchedulingApplyConfiguration) WithFinalizers(values ...string) *PodS return b } -func (b *PodSchedulingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *ClusterTrustBundleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -244,15 +241,7 @@ func (b *PodSchedulingApplyConfiguration) ensureObjectMetaApplyConfigurationExis // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithSpec(value *PodSchedulingSpecApplyConfiguration) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithSpec(value *ClusterTrustBundleSpecApplyConfiguration) *ClusterTrustBundleApplyConfiguration { b.Spec = value return b } - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithStatus(value *PodSchedulingStatusApplyConfiguration) *PodSchedulingApplyConfiguration { - b.Status = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go new file mode 100644 index 00000000..d1aea1d6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ClusterTrustBundleSpecApplyConfiguration represents an declarative configuration of the ClusterTrustBundleSpec type for use +// with apply. +type ClusterTrustBundleSpecApplyConfiguration struct { + SignerName *string `json:"signerName,omitempty"` + TrustBundle *string `json:"trustBundle,omitempty"` +} + +// ClusterTrustBundleSpecApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleSpec type for use with +// apply. +func ClusterTrustBundleSpec() *ClusterTrustBundleSpecApplyConfiguration { + return &ClusterTrustBundleSpecApplyConfiguration{} +} + +// WithSignerName sets the SignerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignerName field is set to the value of the last call. +func (b *ClusterTrustBundleSpecApplyConfiguration) WithSignerName(value string) *ClusterTrustBundleSpecApplyConfiguration { + b.SignerName = &value + return b +} + +// WithTrustBundle sets the TrustBundle field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TrustBundle field is set to the value of the last call. +func (b *ClusterTrustBundleSpecApplyConfiguration) WithTrustBundle(value string) *ClusterTrustBundleSpecApplyConfiguration { + b.TrustBundle = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go index d3b066d9..9ada59ee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go @@ -25,28 +25,29 @@ import ( // ContainerApplyConfiguration represents an declarative configuration of the Container type for use // with apply. type ContainerApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Image *string `json:"image,omitempty"` - Command []string `json:"command,omitempty"` - Args []string `json:"args,omitempty"` - WorkingDir *string `json:"workingDir,omitempty"` - Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` - EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` - Env []EnvVarApplyConfiguration `json:"env,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` - VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` - LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` - ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` - StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` - Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` - TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` - TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` - ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` - Stdin *bool `json:"stdin,omitempty"` - StdinOnce *bool `json:"stdinOnce,omitempty"` - TTY *bool `json:"tty,omitempty"` + Name *string `json:"name,omitempty"` + Image *string `json:"image,omitempty"` + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + WorkingDir *string `json:"workingDir,omitempty"` + Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` + EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` + Env []EnvVarApplyConfiguration `json:"env,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"` + VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` + VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` + LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` + ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` + StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` + Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` + TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` + TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` + Stdin *bool `json:"stdin,omitempty"` + StdinOnce *bool `json:"stdinOnce,omitempty"` + TTY *bool `json:"tty,omitempty"` } // ContainerApplyConfiguration constructs an declarative configuration of the Container type for use with @@ -146,6 +147,19 @@ func (b *ContainerApplyConfiguration) WithResources(value *ResourceRequirementsA return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *ContainerApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *ContainerApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go new file mode 100644 index 00000000..bbbcbc9f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ContainerResizePolicyApplyConfiguration represents an declarative configuration of the ContainerResizePolicy type for use +// with apply. +type ContainerResizePolicyApplyConfiguration struct { + ResourceName *v1.ResourceName `json:"resourceName,omitempty"` + RestartPolicy *v1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"` +} + +// ContainerResizePolicyApplyConfiguration constructs an declarative configuration of the ContainerResizePolicy type for use with +// apply. +func ContainerResizePolicy() *ContainerResizePolicyApplyConfiguration { + return &ContainerResizePolicyApplyConfiguration{} +} + +// WithResourceName sets the ResourceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceName field is set to the value of the last call. +func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value v1.ResourceName) *ContainerResizePolicyApplyConfiguration { + b.ResourceName = &value + return b +} + +// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RestartPolicy field is set to the value of the last call. +func (b *ContainerResizePolicyApplyConfiguration) WithRestartPolicy(value v1.ResourceResizeRestartPolicy) *ContainerResizePolicyApplyConfiguration { + b.RestartPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go index 18d2925c..2b98c465 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go @@ -18,18 +18,24 @@ limitations under the License. package v1 +import ( + corev1 "k8s.io/api/core/v1" +) + // ContainerStatusApplyConfiguration represents an declarative configuration of the ContainerStatus type for use // with apply. type ContainerStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - State *ContainerStateApplyConfiguration `json:"state,omitempty"` - LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` - Ready *bool `json:"ready,omitempty"` - RestartCount *int32 `json:"restartCount,omitempty"` - Image *string `json:"image,omitempty"` - ImageID *string `json:"imageID,omitempty"` - ContainerID *string `json:"containerID,omitempty"` - Started *bool `json:"started,omitempty"` + Name *string `json:"name,omitempty"` + State *ContainerStateApplyConfiguration `json:"state,omitempty"` + LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` + Ready *bool `json:"ready,omitempty"` + RestartCount *int32 `json:"restartCount,omitempty"` + Image *string `json:"image,omitempty"` + ImageID *string `json:"imageID,omitempty"` + ContainerID *string `json:"containerID,omitempty"` + Started *bool `json:"started,omitempty"` + AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` } // ContainerStatusApplyConfiguration constructs an declarative configuration of the ContainerStatus type for use with @@ -109,3 +115,19 @@ func (b *ContainerStatusApplyConfiguration) WithStarted(value bool) *ContainerSt b.Started = &value return b } + +// WithAllocatedResources sets the AllocatedResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocatedResources field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithAllocatedResources(value corev1.ResourceList) *ContainerStatusApplyConfiguration { + b.AllocatedResources = &value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *ContainerStatusApplyConfiguration { + b.Resources = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go index 6c24cd41..c51049ba 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go @@ -126,6 +126,19 @@ func (b *EphemeralContainerApplyConfiguration) WithResources(value *ResourceRequ return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *EphemeralContainerApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *EphemeralContainerApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go index 67e658cf..764b830e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go @@ -25,28 +25,29 @@ import ( // EphemeralContainerCommonApplyConfiguration represents an declarative configuration of the EphemeralContainerCommon type for use // with apply. type EphemeralContainerCommonApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Image *string `json:"image,omitempty"` - Command []string `json:"command,omitempty"` - Args []string `json:"args,omitempty"` - WorkingDir *string `json:"workingDir,omitempty"` - Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` - EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` - Env []EnvVarApplyConfiguration `json:"env,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` - VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` - LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` - ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` - StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` - Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` - TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` - TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` - ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` - Stdin *bool `json:"stdin,omitempty"` - StdinOnce *bool `json:"stdinOnce,omitempty"` - TTY *bool `json:"tty,omitempty"` + Name *string `json:"name,omitempty"` + Image *string `json:"image,omitempty"` + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + WorkingDir *string `json:"workingDir,omitempty"` + Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` + EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` + Env []EnvVarApplyConfiguration `json:"env,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"` + VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` + VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` + LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` + ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` + StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` + Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` + TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` + TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` + Stdin *bool `json:"stdin,omitempty"` + StdinOnce *bool `json:"stdinOnce,omitempty"` + TTY *bool `json:"tty,omitempty"` } // EphemeralContainerCommonApplyConfiguration constructs an declarative configuration of the EphemeralContainerCommon type for use with @@ -146,6 +147,19 @@ func (b *EphemeralContainerCommonApplyConfiguration) WithResources(value *Resour return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *EphemeralContainerCommonApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *EphemeralContainerCommonApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go index 7ee5b995..e9d8e5b2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go @@ -39,6 +39,7 @@ type PodStatusApplyConfiguration struct { ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"` QOSClass *v1.PodQOSClass `json:"qosClass,omitempty"` EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"` + Resize *v1.PodResizeStatus `json:"resize,omitempty"` } // PodStatusApplyConfiguration constructs an declarative configuration of the PodStatus type for use with @@ -175,3 +176,11 @@ func (b *PodStatusApplyConfiguration) WithEphemeralContainerStatuses(values ...* } return b } + +// WithResize sets the Resize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resize field is set to the value of the last call. +func (b *PodStatusApplyConfiguration) WithResize(value v1.PodResizeStatus) *PodStatusApplyConfiguration { + b.Resize = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go index db376b94..493af6fb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go @@ -35,7 +35,7 @@ type ServiceSpecApplyConfiguration struct { LoadBalancerIP *string `json:"loadBalancerIP,omitempty"` LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` ExternalName *string `json:"externalName,omitempty"` - ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty"` + ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty"` HealthCheckNodePort *int32 `json:"healthCheckNodePort,omitempty"` PublishNotReadyAddresses *bool `json:"publishNotReadyAddresses,omitempty"` SessionAffinityConfig *SessionAffinityConfigApplyConfiguration `json:"sessionAffinityConfig,omitempty"` @@ -43,7 +43,7 @@ type ServiceSpecApplyConfiguration struct { IPFamilyPolicy *corev1.IPFamilyPolicy `json:"ipFamilyPolicy,omitempty"` AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"` LoadBalancerClass *string `json:"loadBalancerClass,omitempty"` - InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty"` + InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty"` } // ServiceSpecApplyConfiguration constructs an declarative configuration of the ServiceSpec type for use with @@ -152,7 +152,7 @@ func (b *ServiceSpecApplyConfiguration) WithExternalName(value string) *ServiceS // WithExternalTrafficPolicy sets the ExternalTrafficPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ExternalTrafficPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithExternalTrafficPolicy(value corev1.ServiceExternalTrafficPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithExternalTrafficPolicy(value corev1.ServiceExternalTrafficPolicy) *ServiceSpecApplyConfiguration { b.ExternalTrafficPolicy = &value return b } @@ -218,7 +218,7 @@ func (b *ServiceSpecApplyConfiguration) WithLoadBalancerClass(value string) *Ser // WithInternalTrafficPolicy sets the InternalTrafficPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the InternalTrafficPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.ServiceInternalTrafficPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.ServiceInternalTrafficPolicy) *ServiceSpecApplyConfiguration { b.InternalTrafficPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go deleted file mode 100644 index 27b49bf1..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedCSIDriverApplyConfiguration represents an declarative configuration of the AllowedCSIDriver type for use -// with apply. -type AllowedCSIDriverApplyConfiguration struct { - Name *string `json:"name,omitempty"` -} - -// AllowedCSIDriverApplyConfiguration constructs an declarative configuration of the AllowedCSIDriver type for use with -// apply. -func AllowedCSIDriver() *AllowedCSIDriverApplyConfiguration { - return &AllowedCSIDriverApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *AllowedCSIDriverApplyConfiguration) WithName(value string) *AllowedCSIDriverApplyConfiguration { - b.Name = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go deleted file mode 100644 index 30c3724c..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedFlexVolumeApplyConfiguration represents an declarative configuration of the AllowedFlexVolume type for use -// with apply. -type AllowedFlexVolumeApplyConfiguration struct { - Driver *string `json:"driver,omitempty"` -} - -// AllowedFlexVolumeApplyConfiguration constructs an declarative configuration of the AllowedFlexVolume type for use with -// apply. -func AllowedFlexVolume() *AllowedFlexVolumeApplyConfiguration { - return &AllowedFlexVolumeApplyConfiguration{} -} - -// WithDriver sets the Driver field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Driver field is set to the value of the last call. -func (b *AllowedFlexVolumeApplyConfiguration) WithDriver(value string) *AllowedFlexVolumeApplyConfiguration { - b.Driver = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go deleted file mode 100644 index 493815d8..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedHostPathApplyConfiguration represents an declarative configuration of the AllowedHostPath type for use -// with apply. -type AllowedHostPathApplyConfiguration struct { - PathPrefix *string `json:"pathPrefix,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` -} - -// AllowedHostPathApplyConfiguration constructs an declarative configuration of the AllowedHostPath type for use with -// apply. -func AllowedHostPath() *AllowedHostPathApplyConfiguration { - return &AllowedHostPathApplyConfiguration{} -} - -// WithPathPrefix sets the PathPrefix field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PathPrefix field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithPathPrefix(value string) *AllowedHostPathApplyConfiguration { - b.PathPrefix = &value - return b -} - -// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnly field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithReadOnly(value bool) *AllowedHostPathApplyConfiguration { - b.ReadOnly = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go deleted file mode 100644 index c7434a6a..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// FSGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the FSGroupStrategyOptions type for use -// with apply. -type FSGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.FSGroupStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// FSGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the FSGroupStrategyOptions type for use with -// apply. -func FSGroupStrategyOptions() *FSGroupStrategyOptionsApplyConfiguration { - return &FSGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.FSGroupStrategyType) *FSGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *FSGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go deleted file mode 100644 index 7c796881..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// HostPortRangeApplyConfiguration represents an declarative configuration of the HostPortRange type for use -// with apply. -type HostPortRangeApplyConfiguration struct { - Min *int32 `json:"min,omitempty"` - Max *int32 `json:"max,omitempty"` -} - -// HostPortRangeApplyConfiguration constructs an declarative configuration of the HostPortRange type for use with -// apply. -func HostPortRange() *HostPortRangeApplyConfiguration { - return &HostPortRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMin(value int32) *HostPortRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMax(value int32) *HostPortRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go deleted file mode 100644 index af46f765..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// IDRangeApplyConfiguration represents an declarative configuration of the IDRange type for use -// with apply. -type IDRangeApplyConfiguration struct { - Min *int64 `json:"min,omitempty"` - Max *int64 `json:"max,omitempty"` -} - -// IDRangeApplyConfiguration constructs an declarative configuration of the IDRange type for use with -// apply. -func IDRange() *IDRangeApplyConfiguration { - return &IDRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMin(value int64) *IDRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMax(value int64) *IDRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go deleted file mode 100644 index de3949dc..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// PodSecurityPolicySpecApplyConfiguration represents an declarative configuration of the PodSecurityPolicySpec type for use -// with apply. -type PodSecurityPolicySpecApplyConfiguration struct { - Privileged *bool `json:"privileged,omitempty"` - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty"` - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty"` - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty"` - Volumes []v1beta1.FSType `json:"volumes,omitempty"` - HostNetwork *bool `json:"hostNetwork,omitempty"` - HostPorts []HostPortRangeApplyConfiguration `json:"hostPorts,omitempty"` - HostPID *bool `json:"hostPID,omitempty"` - HostIPC *bool `json:"hostIPC,omitempty"` - SELinux *SELinuxStrategyOptionsApplyConfiguration `json:"seLinux,omitempty"` - RunAsUser *RunAsUserStrategyOptionsApplyConfiguration `json:"runAsUser,omitempty"` - RunAsGroup *RunAsGroupStrategyOptionsApplyConfiguration `json:"runAsGroup,omitempty"` - SupplementalGroups *SupplementalGroupsStrategyOptionsApplyConfiguration `json:"supplementalGroups,omitempty"` - FSGroup *FSGroupStrategyOptionsApplyConfiguration `json:"fsGroup,omitempty"` - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty"` - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` - AllowedHostPaths []AllowedHostPathApplyConfiguration `json:"allowedHostPaths,omitempty"` - AllowedFlexVolumes []AllowedFlexVolumeApplyConfiguration `json:"allowedFlexVolumes,omitempty"` - AllowedCSIDrivers []AllowedCSIDriverApplyConfiguration `json:"allowedCSIDrivers,omitempty"` - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty"` - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty"` - RuntimeClass *RuntimeClassStrategyOptionsApplyConfiguration `json:"runtimeClass,omitempty"` -} - -// PodSecurityPolicySpecApplyConfiguration constructs an declarative configuration of the PodSecurityPolicySpec type for use with -// apply. -func PodSecurityPolicySpec() *PodSecurityPolicySpecApplyConfiguration { - return &PodSecurityPolicySpecApplyConfiguration{} -} - -// WithPrivileged sets the Privileged field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Privileged field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithPrivileged(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.Privileged = &value - return b -} - -// WithDefaultAddCapabilities adds the given value to the DefaultAddCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the DefaultAddCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAddCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.DefaultAddCapabilities = append(b.DefaultAddCapabilities, values[i]) - } - return b -} - -// WithRequiredDropCapabilities adds the given value to the RequiredDropCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the RequiredDropCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRequiredDropCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.RequiredDropCapabilities = append(b.RequiredDropCapabilities, values[i]) - } - return b -} - -// WithAllowedCapabilities adds the given value to the AllowedCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedCapabilities = append(b.AllowedCapabilities, values[i]) - } - return b -} - -// WithVolumes adds the given value to the Volumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Volumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithVolumes(values ...v1beta1.FSType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.Volumes = append(b.Volumes, values[i]) - } - return b -} - -// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostNetwork field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostNetwork(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostNetwork = &value - return b -} - -// WithHostPorts adds the given value to the HostPorts field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the HostPorts field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPorts(values ...*HostPortRangeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithHostPorts") - } - b.HostPorts = append(b.HostPorts, *values[i]) - } - return b -} - -// WithHostPID sets the HostPID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostPID field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPID(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostPID = &value - return b -} - -// WithHostIPC sets the HostIPC field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostIPC field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostIPC(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostIPC = &value - return b -} - -// WithSELinux sets the SELinux field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinux field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSELinux(value *SELinuxStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SELinux = value - return b -} - -// WithRunAsUser sets the RunAsUser field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsUser field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsUser(value *RunAsUserStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsUser = value - return b -} - -// WithRunAsGroup sets the RunAsGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsGroup(value *RunAsGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsGroup = value - return b -} - -// WithSupplementalGroups sets the SupplementalGroups field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SupplementalGroups field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSupplementalGroups(value *SupplementalGroupsStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SupplementalGroups = value - return b -} - -// WithFSGroup sets the FSGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FSGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithFSGroup(value *FSGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.FSGroup = value - return b -} - -// WithReadOnlyRootFilesystem sets the ReadOnlyRootFilesystem field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnlyRootFilesystem field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithReadOnlyRootFilesystem(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.ReadOnlyRootFilesystem = &value - return b -} - -// WithDefaultAllowPrivilegeEscalation sets the DefaultAllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultAllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.DefaultAllowPrivilegeEscalation = &value - return b -} - -// WithAllowPrivilegeEscalation sets the AllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.AllowPrivilegeEscalation = &value - return b -} - -// WithAllowedHostPaths adds the given value to the AllowedHostPaths field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedHostPaths field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedHostPaths(values ...*AllowedHostPathApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedHostPaths") - } - b.AllowedHostPaths = append(b.AllowedHostPaths, *values[i]) - } - return b -} - -// WithAllowedFlexVolumes adds the given value to the AllowedFlexVolumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedFlexVolumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedFlexVolumes(values ...*AllowedFlexVolumeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedFlexVolumes") - } - b.AllowedFlexVolumes = append(b.AllowedFlexVolumes, *values[i]) - } - return b -} - -// WithAllowedCSIDrivers adds the given value to the AllowedCSIDrivers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCSIDrivers field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCSIDrivers(values ...*AllowedCSIDriverApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedCSIDrivers") - } - b.AllowedCSIDrivers = append(b.AllowedCSIDrivers, *values[i]) - } - return b -} - -// WithAllowedUnsafeSysctls adds the given value to the AllowedUnsafeSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedUnsafeSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedUnsafeSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedUnsafeSysctls = append(b.AllowedUnsafeSysctls, values[i]) - } - return b -} - -// WithForbiddenSysctls adds the given value to the ForbiddenSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ForbiddenSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithForbiddenSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.ForbiddenSysctls = append(b.ForbiddenSysctls, values[i]) - } - return b -} - -// WithAllowedProcMountTypes adds the given value to the AllowedProcMountTypes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedProcMountTypes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedProcMountTypes(values ...v1.ProcMountType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedProcMountTypes = append(b.AllowedProcMountTypes, values[i]) - } - return b -} - -// WithRuntimeClass sets the RuntimeClass field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RuntimeClass field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRuntimeClass(value *RuntimeClassStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RuntimeClass = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go deleted file mode 100644 index 75e76e85..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// RunAsGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsGroupStrategyOptions type for use -// with apply. -type RunAsGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsGroupStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsGroupStrategyOptions type for use with -// apply. -func RunAsGroupStrategyOptions() *RunAsGroupStrategyOptionsApplyConfiguration { - return &RunAsGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsGroupStrategy) *RunAsGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go deleted file mode 100644 index 712c1675..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// RunAsUserStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsUserStrategyOptions type for use -// with apply. -type RunAsUserStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsUserStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsUserStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsUserStrategyOptions type for use with -// apply. -func RunAsUserStrategyOptions() *RunAsUserStrategyOptionsApplyConfiguration { - return &RunAsUserStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsUserStrategy) *RunAsUserStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsUserStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go deleted file mode 100644 index c19a7ce6..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// RuntimeClassStrategyOptionsApplyConfiguration represents an declarative configuration of the RuntimeClassStrategyOptions type for use -// with apply. -type RuntimeClassStrategyOptionsApplyConfiguration struct { - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames,omitempty"` - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty"` -} - -// RuntimeClassStrategyOptionsApplyConfiguration constructs an declarative configuration of the RuntimeClassStrategyOptions type for use with -// apply. -func RuntimeClassStrategyOptions() *RuntimeClassStrategyOptionsApplyConfiguration { - return &RuntimeClassStrategyOptionsApplyConfiguration{} -} - -// WithAllowedRuntimeClassNames adds the given value to the AllowedRuntimeClassNames field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedRuntimeClassNames field. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithAllowedRuntimeClassNames(values ...string) *RuntimeClassStrategyOptionsApplyConfiguration { - for i := range values { - b.AllowedRuntimeClassNames = append(b.AllowedRuntimeClassNames, values[i]) - } - return b -} - -// WithDefaultRuntimeClassName sets the DefaultRuntimeClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultRuntimeClassName field is set to the value of the last call. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithDefaultRuntimeClassName(value string) *RuntimeClassStrategyOptionsApplyConfiguration { - b.DefaultRuntimeClassName = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go deleted file mode 100644 index 265906a7..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// SELinuxStrategyOptionsApplyConfiguration represents an declarative configuration of the SELinuxStrategyOptions type for use -// with apply. -type SELinuxStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SELinuxStrategy `json:"rule,omitempty"` - SELinuxOptions *v1.SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` -} - -// SELinuxStrategyOptionsApplyConfiguration constructs an declarative configuration of the SELinuxStrategyOptions type for use with -// apply. -func SELinuxStrategyOptions() *SELinuxStrategyOptionsApplyConfiguration { - return &SELinuxStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SELinuxStrategy) *SELinuxStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithSELinuxOptions sets the SELinuxOptions field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinuxOptions field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithSELinuxOptions(value *v1.SELinuxOptionsApplyConfiguration) *SELinuxStrategyOptionsApplyConfiguration { - b.SELinuxOptions = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go deleted file mode 100644 index ec431381..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// SupplementalGroupsStrategyOptionsApplyConfiguration represents an declarative configuration of the SupplementalGroupsStrategyOptions type for use -// with apply. -type SupplementalGroupsStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SupplementalGroupsStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// SupplementalGroupsStrategyOptionsApplyConfiguration constructs an declarative configuration of the SupplementalGroupsStrategyOptions type for use with -// apply. -func SupplementalGroupsStrategyOptions() *SupplementalGroupsStrategyOptionsApplyConfiguration { - return &SupplementalGroupsStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SupplementalGroupsStrategyType) *SupplementalGroupsStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *SupplementalGroupsStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 94dd2160..361b2f4e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -39,6 +39,17 @@ func Parser() *typed.Parser { var parserOnce sync.Once var parser *typed.Parser var schemaYAML = typed.YAMLObject(`types: +- name: io.k8s.api.admissionregistration.v1.MatchCondition + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1.MutatingWebhook map: fields: @@ -55,6 +66,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -167,6 +186,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -225,6 +252,39 @@ var schemaYAML = typed.YAMLObject(`types: - name: url type: scalar: string +- name: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: valueExpression + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning + map: + fields: + - name: fieldRef + type: + scalar: string + default: "" + - name: warning + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.MatchCondition + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1alpha1.MatchResources map: fields: @@ -307,6 +367,15 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.TypeChecking + map: + fields: + - name: expressionWarnings + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning + elementRelationship: atomic - name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy map: fields: @@ -324,6 +393,10 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec default: {} + - name: status + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus + default: {} - name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding map: fields: @@ -353,12 +426,32 @@ var schemaYAML = typed.YAMLObject(`types: - name: policyName type: scalar: string + - name: validationActions + type: + list: + elementType: + scalar: string + elementRelationship: associative - name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec map: fields: + - name: auditAnnotations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation + elementRelationship: atomic - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchConstraints type: namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources @@ -371,6 +464,23 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.admissionregistration.v1alpha1.Validation elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: observedGeneration + type: + scalar: numeric + - name: typeChecking + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.TypeChecking - name: io.k8s.api.admissionregistration.v1alpha1.Validation map: fields: @@ -381,9 +491,23 @@ var schemaYAML = typed.YAMLObject(`types: - name: message type: scalar: string + - name: messageExpression + type: + scalar: string - name: reason type: scalar: string +- name: io.k8s.api.admissionregistration.v1beta1.MatchCondition + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1beta1.MutatingWebhook map: fields: @@ -400,6 +524,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -482,6 +614,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -3502,6 +3642,33 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type +- name: io.k8s.api.certificates.v1alpha1.ClusterTrustBundle + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec + default: {} +- name: io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec + map: + fields: + - name: signerName + type: + scalar: string + - name: trustBundle + type: + scalar: string + default: "" - name: io.k8s.api.certificates.v1beta1.CertificateSigningRequest map: fields: @@ -4129,6 +4296,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: readinessProbe type: namedType: io.k8s.api.core.v1.Probe + - name: resizePolicy + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerResizePolicy + elementRelationship: atomic - name: resources type: namedType: io.k8s.api.core.v1.ResourceRequirements @@ -4205,6 +4378,17 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: TCP +- name: io.k8s.api.core.v1.ContainerResizePolicy + map: + fields: + - name: resourceName + type: + scalar: string + default: "" + - name: restartPolicy + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ContainerState map: fields: @@ -4263,6 +4447,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.ContainerStatus map: fields: + - name: allocatedResources + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: containerID type: scalar: string @@ -4286,6 +4475,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: boolean default: false + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements - name: restartCount type: scalar: numeric @@ -4521,6 +4713,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: readinessProbe type: namedType: io.k8s.api.core.v1.Probe + - name: resizePolicy + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerResizePolicy + elementRelationship: atomic - name: resources type: namedType: io.k8s.api.core.v1.ResourceRequirements @@ -6185,6 +6383,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: reason type: scalar: string + - name: resize + type: + scalar: string - name: startTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time @@ -7734,29 +7935,6 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime default: {} -- name: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - map: - fields: - - name: name - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.AllowedFlexVolume - map: - fields: - - name: driver - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.AllowedHostPath - map: - fields: - - name: pathPrefix - type: - scalar: string - - name: readOnly - type: - scalar: boolean - name: io.k8s.api.extensions.v1beta1.DaemonSet map: fields: @@ -7992,18 +8170,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - name: io.k8s.api.extensions.v1beta1.HTTPIngressPath map: fields: @@ -8026,28 +8192,6 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.extensions.v1beta1.HTTPIngressPath elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.HostPortRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 -- name: io.k8s.api.extensions.v1beta1.IDRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 - name: io.k8s.api.extensions.v1beta1.IPBlock map: fields: @@ -8293,135 +8437,6 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicy - map: - fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec - default: {} -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec - map: - fields: - - name: allowPrivilegeEscalation - type: - scalar: boolean - - name: allowedCSIDrivers - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - elementRelationship: atomic - - name: allowedCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedFlexVolumes - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedFlexVolume - elementRelationship: atomic - - name: allowedHostPaths - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedHostPath - elementRelationship: atomic - - name: allowedProcMountTypes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedUnsafeSysctls - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAddCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAllowPrivilegeEscalation - type: - scalar: boolean - - name: forbiddenSysctls - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: fsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - default: {} - - name: hostIPC - type: - scalar: boolean - - name: hostNetwork - type: - scalar: boolean - - name: hostPID - type: - scalar: boolean - - name: hostPorts - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.HostPortRange - elementRelationship: atomic - - name: privileged - type: - scalar: boolean - - name: readOnlyRootFilesystem - type: - scalar: boolean - - name: requiredDropCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: runAsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions - - name: runAsUser - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - default: {} - - name: runtimeClass - type: - namedType: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - - name: seLinux - type: - namedType: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions - default: {} - - name: supplementalGroups - type: - namedType: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions - default: {} - - name: volumes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - name: io.k8s.api.extensions.v1beta1.ReplicaSet map: fields: @@ -8531,66 +8546,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: maxUnavailable type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - map: - fields: - - name: allowedRuntimeClassNames - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultRuntimeClassName - type: - scalar: string -- name: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions - map: - fields: - - name: rule - type: - scalar: string - default: "" - - name: seLinuxOptions - type: - namedType: io.k8s.api.core.v1.SELinuxOptions -- name: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod map: fields: @@ -10270,6 +10225,47 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 +- name: io.k8s.api.networking.v1alpha1.IPAddress + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1alpha1.IPAddressSpec + default: {} +- name: io.k8s.api.networking.v1alpha1.IPAddressSpec + map: + fields: + - name: parentRef + type: + namedType: io.k8s.api.networking.v1alpha1.ParentReference +- name: io.k8s.api.networking.v1alpha1.ParentReference + map: + fields: + - name: group + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resource + type: + scalar: string + - name: uid + type: + scalar: string - name: io.k8s.api.networking.v1beta1.HTTPIngressPath map: fields: @@ -11509,19 +11505,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string -- name: io.k8s.api.resource.v1alpha1.AllocationResult +- name: io.k8s.api.resource.v1alpha2.AllocationResult map: fields: - name: availableOnNodes type: namedType: io.k8s.api.core.v1.NodeSelector - - name: resourceHandle + - name: resourceHandles type: - scalar: string + list: + elementType: + namedType: io.k8s.api.resource.v1alpha2.ResourceHandle + elementRelationship: atomic - name: shareable type: scalar: boolean -- name: io.k8s.api.resource.v1alpha1.PodScheduling +- name: io.k8s.api.resource.v1alpha2.PodSchedulingContext map: fields: - name: apiVersion @@ -11536,13 +11535,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.PodSchedulingSpec + namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec default: {} - name: status type: - namedType: io.k8s.api.resource.v1alpha1.PodSchedulingStatus + namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus default: {} -- name: io.k8s.api.resource.v1alpha1.PodSchedulingSpec +- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec map: fields: - name: potentialNodes @@ -11554,18 +11553,18 @@ var schemaYAML = typed.YAMLObject(`types: - name: selectedNode type: scalar: string -- name: io.k8s.api.resource.v1alpha1.PodSchedulingStatus +- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus map: fields: - name: resourceClaims type: list: elementType: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimSchedulingStatus + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus elementRelationship: associative keys: - name -- name: io.k8s.api.resource.v1alpha1.ResourceClaim +- name: io.k8s.api.resource.v1alpha2.ResourceClaim map: fields: - name: apiVersion @@ -11580,13 +11579,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimSpec + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec default: {} - name: status type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimStatus + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimStatus default: {} -- name: io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference +- name: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference map: fields: - name: apiGroup @@ -11604,7 +11603,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha1.ResourceClaimParametersReference +- name: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference map: fields: - name: apiGroup @@ -11618,7 +11617,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha1.ResourceClaimSchedulingStatus +- name: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus map: fields: - name: name @@ -11630,7 +11629,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.resource.v1alpha1.ResourceClaimSpec +- name: io.k8s.api.resource.v1alpha2.ResourceClaimSpec map: fields: - name: allocationMode @@ -11638,17 +11637,17 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string - name: parametersRef type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimParametersReference + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference - name: resourceClassName type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha1.ResourceClaimStatus +- name: io.k8s.api.resource.v1alpha2.ResourceClaimStatus map: fields: - name: allocation type: - namedType: io.k8s.api.resource.v1alpha1.AllocationResult + namedType: io.k8s.api.resource.v1alpha2.AllocationResult - name: deallocationRequested type: scalar: boolean @@ -11659,11 +11658,11 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference elementRelationship: associative keys: - uid -- name: io.k8s.api.resource.v1alpha1.ResourceClaimTemplate +- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplate map: fields: - name: apiVersion @@ -11678,9 +11677,9 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimTemplateSpec + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec default: {} -- name: io.k8s.api.resource.v1alpha1.ResourceClaimTemplateSpec +- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec map: fields: - name: metadata @@ -11689,9 +11688,9 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimSpec + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec default: {} -- name: io.k8s.api.resource.v1alpha1.ResourceClass +- name: io.k8s.api.resource.v1alpha2.ResourceClass map: fields: - name: apiVersion @@ -11710,11 +11709,11 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: parametersRef type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClassParametersReference + namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference - name: suitableNodes type: namedType: io.k8s.api.core.v1.NodeSelector -- name: io.k8s.api.resource.v1alpha1.ResourceClassParametersReference +- name: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference map: fields: - name: apiGroup @@ -11731,6 +11730,15 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string +- name: io.k8s.api.resource.v1alpha2.ResourceHandle + map: + fields: + - name: data + type: + scalar: string + - name: driverName + type: + scalar: string - name: io.k8s.api.scheduling.v1.PriorityClass map: fields: diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go deleted file mode 100644 index f400e516..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -// GroupVersionKindApplyConfiguration represents an declarative configuration of the GroupVersionKind type for use -// with apply. -type GroupVersionKindApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Version *string `json:"version,omitempty"` - Kind *string `json:"kind,omitempty"` -} - -// GroupVersionKindApplyConfiguration constructs an declarative configuration of the GroupVersionKind type for use with -// apply. -func GroupVersionKind() *GroupVersionKindApplyConfiguration { - return &GroupVersionKindApplyConfiguration{} -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *GroupVersionKindApplyConfiguration) WithGroup(value string) *GroupVersionKindApplyConfiguration { - b.Group = &value - return b -} - -// WithVersion sets the Version field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Version field is set to the value of the last call. -func (b *GroupVersionKindApplyConfiguration) WithVersion(value string) *GroupVersionKindApplyConfiguration { - b.Version = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *GroupVersionKindApplyConfiguration) WithKind(value string) *GroupVersionKindApplyConfiguration { - b.Kind = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go deleted file mode 100644 index 5cadee33..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -// ListMetaApplyConfiguration represents an declarative configuration of the ListMeta type for use -// with apply. -type ListMetaApplyConfiguration struct { - SelfLink *string `json:"selfLink,omitempty"` - ResourceVersion *string `json:"resourceVersion,omitempty"` - Continue *string `json:"continue,omitempty"` - RemainingItemCount *int64 `json:"remainingItemCount,omitempty"` -} - -// ListMetaApplyConfiguration constructs an declarative configuration of the ListMeta type for use with -// apply. -func ListMeta() *ListMetaApplyConfiguration { - return &ListMetaApplyConfiguration{} -} - -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithSelfLink(value string) *ListMetaApplyConfiguration { - b.SelfLink = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithResourceVersion(value string) *ListMetaApplyConfiguration { - b.ResourceVersion = &value - return b -} - -// WithContinue sets the Continue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Continue field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithContinue(value string) *ListMetaApplyConfiguration { - b.Continue = &value - return b -} - -// WithRemainingItemCount sets the RemainingItemCount field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RemainingItemCount field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithRemainingItemCount(value int64) *ListMetaApplyConfiguration { - b.RemainingItemCount = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go deleted file mode 100644 index 7db43208..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// StatusApplyConfiguration represents an declarative configuration of the Status type for use -// with apply. -type StatusApplyConfiguration struct { - TypeMetaApplyConfiguration `json:",inline"` - *ListMetaApplyConfiguration `json:"metadata,omitempty"` - Status *string `json:"status,omitempty"` - Message *string `json:"message,omitempty"` - Reason *metav1.StatusReason `json:"reason,omitempty"` - Details *StatusDetailsApplyConfiguration `json:"details,omitempty"` - Code *int32 `json:"code,omitempty"` -} - -// StatusApplyConfiguration constructs an declarative configuration of the Status type for use with -// apply. -func Status() *StatusApplyConfiguration { - b := &StatusApplyConfiguration{} - b.WithKind("Status") - b.WithAPIVersion("meta.k8s.io/v1") - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithKind(value string) *StatusApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithAPIVersion(value string) *StatusApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithSelfLink(value string) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithResourceVersion(value string) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithContinue sets the Continue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Continue field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithContinue(value string) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.Continue = &value - return b -} - -// WithRemainingItemCount sets the RemainingItemCount field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RemainingItemCount field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithRemainingItemCount(value int64) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.RemainingItemCount = &value - return b -} - -func (b *StatusApplyConfiguration) ensureListMetaApplyConfigurationExists() { - if b.ListMetaApplyConfiguration == nil { - b.ListMetaApplyConfiguration = &ListMetaApplyConfiguration{} - } -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithStatus(value string) *StatusApplyConfiguration { - b.Status = &value - return b -} - -// WithMessage sets the Message field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Message field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithMessage(value string) *StatusApplyConfiguration { - b.Message = &value - return b -} - -// WithReason sets the Reason field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Reason field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithReason(value metav1.StatusReason) *StatusApplyConfiguration { - b.Reason = &value - return b -} - -// WithDetails sets the Details field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Details field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithDetails(value *StatusDetailsApplyConfiguration) *StatusApplyConfiguration { - b.Details = value - return b -} - -// WithCode sets the Code field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Code field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithCode(value int32) *StatusApplyConfiguration { - b.Code = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go deleted file mode 100644 index 7f05bca4..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// StatusCauseApplyConfiguration represents an declarative configuration of the StatusCause type for use -// with apply. -type StatusCauseApplyConfiguration struct { - Type *v1.CauseType `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - Field *string `json:"field,omitempty"` -} - -// StatusCauseApplyConfiguration constructs an declarative configuration of the StatusCause type for use with -// apply. -func StatusCause() *StatusCauseApplyConfiguration { - return &StatusCauseApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *StatusCauseApplyConfiguration) WithType(value v1.CauseType) *StatusCauseApplyConfiguration { - b.Type = &value - return b -} - -// WithMessage sets the Message field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Message field is set to the value of the last call. -func (b *StatusCauseApplyConfiguration) WithMessage(value string) *StatusCauseApplyConfiguration { - b.Message = &value - return b -} - -// WithField sets the Field field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Field field is set to the value of the last call. -func (b *StatusCauseApplyConfiguration) WithField(value string) *StatusCauseApplyConfiguration { - b.Field = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go deleted file mode 100644 index a7dbaa1b..00000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - types "k8s.io/apimachinery/pkg/types" -) - -// StatusDetailsApplyConfiguration represents an declarative configuration of the StatusDetails type for use -// with apply. -type StatusDetailsApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Group *string `json:"group,omitempty"` - Kind *string `json:"kind,omitempty"` - UID *types.UID `json:"uid,omitempty"` - Causes []StatusCauseApplyConfiguration `json:"causes,omitempty"` - RetryAfterSeconds *int32 `json:"retryAfterSeconds,omitempty"` -} - -// StatusDetailsApplyConfiguration constructs an declarative configuration of the StatusDetails type for use with -// apply. -func StatusDetails() *StatusDetailsApplyConfiguration { - return &StatusDetailsApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithName(value string) *StatusDetailsApplyConfiguration { - b.Name = &value - return b -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithGroup(value string) *StatusDetailsApplyConfiguration { - b.Group = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithKind(value string) *StatusDetailsApplyConfiguration { - b.Kind = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithUID(value types.UID) *StatusDetailsApplyConfiguration { - b.UID = &value - return b -} - -// WithCauses adds the given value to the Causes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Causes field. -func (b *StatusDetailsApplyConfiguration) WithCauses(values ...*StatusCauseApplyConfiguration) *StatusDetailsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithCauses") - } - b.Causes = append(b.Causes, *values[i]) - } - return b -} - -// WithRetryAfterSeconds sets the RetryAfterSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RetryAfterSeconds field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithRetryAfterSeconds(value int32) *StatusDetailsApplyConfiguration { - b.RetryAfterSeconds = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go similarity index 66% rename from vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go rename to vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go index c70906cf..da682211 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1alpha1 import ( - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,63 +27,63 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSecurityPolicyApplyConfiguration represents an declarative configuration of the PodSecurityPolicy type for use +// IPAddressApplyConfiguration represents an declarative configuration of the IPAddress type for use // with apply. -type PodSecurityPolicyApplyConfiguration struct { +type IPAddressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSecurityPolicySpecApplyConfiguration `json:"spec,omitempty"` + Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"` } -// PodSecurityPolicy constructs an declarative configuration of the PodSecurityPolicy type for use with +// IPAddress constructs an declarative configuration of the IPAddress type for use with // apply. -func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { - b := &PodSecurityPolicyApplyConfiguration{} +func IPAddress(name string) *IPAddressApplyConfiguration { + b := &IPAddressApplyConfiguration{} b.WithName(name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("extensions/v1beta1") + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1alpha1") return b } -// ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from -// podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a -// PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractIPAddress extracts the applied configuration owned by fieldManager from +// iPAddress. If no managedFields are found in iPAddress for fieldManager, a +// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API. -// ExtractPodSecurityPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API. +// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "") +func ExtractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "") } -// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except +// ExtractIPAddressStatus is the same as ExtractIPAddress except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSecurityPolicyStatus(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status") +func ExtractIPAddressStatus(iPAddress *networkingv1alpha1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "status") } -func extractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) { - b := &PodSecurityPolicyApplyConfiguration{} - err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource) +func extractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) { + b := &IPAddressApplyConfiguration{} + err := managedfields.ExtractInto(iPAddress, internal.Parser().Type("io.k8s.api.networking.v1alpha1.IPAddress"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(podSecurityPolicy.Name) + b.WithName(iPAddress.Name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("extensions/v1beta1") + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1alpha1") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration { b.Kind = &value return b } @@ -91,7 +91,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurit // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration { b.APIVersion = &value return b } @@ -99,7 +99,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodS // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -108,7 +108,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurit // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -117,7 +117,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *Po // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -126,7 +126,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSe // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -135,7 +135,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecur // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -144,7 +144,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -153,7 +153,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSe // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -162,7 +162,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1 // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -171,7 +171,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1 // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -181,7 +181,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(val // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -196,7 +196,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]stri // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -210,7 +210,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -224,7 +224,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1. // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -232,7 +232,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) * return b } -func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *IPAddressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -241,7 +241,7 @@ func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfiguration // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithSpec(value *PodSecurityPolicySpecApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfiguration) *IPAddressApplyConfiguration { b.Spec = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go new file mode 100644 index 00000000..064963d6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// IPAddressSpecApplyConfiguration represents an declarative configuration of the IPAddressSpec type for use +// with apply. +type IPAddressSpecApplyConfiguration struct { + ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"` +} + +// IPAddressSpecApplyConfiguration constructs an declarative configuration of the IPAddressSpec type for use with +// apply. +func IPAddressSpec() *IPAddressSpecApplyConfiguration { + return &IPAddressSpecApplyConfiguration{} +} + +// WithParentRef sets the ParentRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParentRef field is set to the value of the last call. +func (b *IPAddressSpecApplyConfiguration) WithParentRef(value *ParentReferenceApplyConfiguration) *IPAddressSpecApplyConfiguration { + b.ParentRef = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go new file mode 100644 index 00000000..14b10b19 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go @@ -0,0 +1,79 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + types "k8s.io/apimachinery/pkg/types" +) + +// ParentReferenceApplyConfiguration represents an declarative configuration of the ParentReference type for use +// with apply. +type ParentReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + UID *types.UID `json:"uid,omitempty"` +} + +// ParentReferenceApplyConfiguration constructs an declarative configuration of the ParentReference type for use with +// apply. +func ParentReference() *ParentReferenceApplyConfiguration { + return &ParentReferenceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithGroup(value string) *ParentReferenceApplyConfiguration { + b.Group = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithResource(value string) *ParentReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithNamespace(value string) *ParentReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithUID(value types.UID) *ParentReferenceApplyConfiguration { + b.UID = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go similarity index 76% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/allocationresult.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go index a2ad3adf..bc6078aa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/allocationresult.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" @@ -25,7 +25,7 @@ import ( // AllocationResultApplyConfiguration represents an declarative configuration of the AllocationResult type for use // with apply. type AllocationResultApplyConfiguration struct { - ResourceHandle *string `json:"resourceHandle,omitempty"` + ResourceHandles []ResourceHandleApplyConfiguration `json:"resourceHandles,omitempty"` AvailableOnNodes *v1.NodeSelectorApplyConfiguration `json:"availableOnNodes,omitempty"` Shareable *bool `json:"shareable,omitempty"` } @@ -36,11 +36,16 @@ func AllocationResult() *AllocationResultApplyConfiguration { return &AllocationResultApplyConfiguration{} } -// WithResourceHandle sets the ResourceHandle field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceHandle field is set to the value of the last call. -func (b *AllocationResultApplyConfiguration) WithResourceHandle(value string) *AllocationResultApplyConfiguration { - b.ResourceHandle = &value +// WithResourceHandles adds the given value to the ResourceHandles field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceHandles field. +func (b *AllocationResultApplyConfiguration) WithResourceHandles(values ...*ResourceHandleApplyConfiguration) *AllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceHandles") + } + b.ResourceHandles = append(b.ResourceHandles, *values[i]) + } return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go new file mode 100644 index 00000000..1dfb6ff9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go @@ -0,0 +1,258 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PodSchedulingContextApplyConfiguration represents an declarative configuration of the PodSchedulingContext type for use +// with apply. +type PodSchedulingContextApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PodSchedulingContextSpecApplyConfiguration `json:"spec,omitempty"` + Status *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"` +} + +// PodSchedulingContext constructs an declarative configuration of the PodSchedulingContext type for use with +// apply. +func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration { + b := &PodSchedulingContextApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("PodSchedulingContext") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b +} + +// ExtractPodSchedulingContext extracts the applied configuration owned by fieldManager from +// podSchedulingContext. If no managedFields are found in podSchedulingContext for fieldManager, a +// PodSchedulingContextApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// podSchedulingContext must be a unmodified PodSchedulingContext API object that was retrieved from the Kubernetes API. +// ExtractPodSchedulingContext provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { + return extractPodSchedulingContext(podSchedulingContext, fieldManager, "") +} + +// ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { + return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status") +} + +func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) { + b := &PodSchedulingContextApplyConfiguration{} + err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha2.PodSchedulingContext"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(podSchedulingContext.Name) + b.WithNamespace(podSchedulingContext.Namespace) + + b.WithKind("PodSchedulingContext") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithKind(value string) *PodSchedulingContextApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithAPIVersion(value string) *PodSchedulingContextApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithName(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithGenerateName(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithNamespace(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithUID(value types.UID) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithResourceVersion(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithGeneration(value int64) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *PodSchedulingContextApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingContextSpecApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodSchedulingContextStatusApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go similarity index 67% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingspec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go index 9fd3c1ee..c95d3295 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go @@ -16,25 +16,25 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 -// PodSchedulingSpecApplyConfiguration represents an declarative configuration of the PodSchedulingSpec type for use +// PodSchedulingContextSpecApplyConfiguration represents an declarative configuration of the PodSchedulingContextSpec type for use // with apply. -type PodSchedulingSpecApplyConfiguration struct { +type PodSchedulingContextSpecApplyConfiguration struct { SelectedNode *string `json:"selectedNode,omitempty"` PotentialNodes []string `json:"potentialNodes,omitempty"` } -// PodSchedulingSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingSpec type for use with +// PodSchedulingContextSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingContextSpec type for use with // apply. -func PodSchedulingSpec() *PodSchedulingSpecApplyConfiguration { - return &PodSchedulingSpecApplyConfiguration{} +func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration { + return &PodSchedulingContextSpecApplyConfiguration{} } // WithSelectedNode sets the SelectedNode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SelectedNode field is set to the value of the last call. -func (b *PodSchedulingSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingSpecApplyConfiguration { +func (b *PodSchedulingContextSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingContextSpecApplyConfiguration { b.SelectedNode = &value return b } @@ -42,7 +42,7 @@ func (b *PodSchedulingSpecApplyConfiguration) WithSelectedNode(value string) *Po // WithPotentialNodes adds the given value to the PotentialNodes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the PotentialNodes field. -func (b *PodSchedulingSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingSpecApplyConfiguration { +func (b *PodSchedulingContextSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingContextSpecApplyConfiguration { for i := range values { b.PotentialNodes = append(b.PotentialNodes, values[i]) } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go similarity index 64% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go index 5744f6c3..a8b10b9a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go @@ -16,24 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 -// PodSchedulingStatusApplyConfiguration represents an declarative configuration of the PodSchedulingStatus type for use +// PodSchedulingContextStatusApplyConfiguration represents an declarative configuration of the PodSchedulingContextStatus type for use // with apply. -type PodSchedulingStatusApplyConfiguration struct { +type PodSchedulingContextStatusApplyConfiguration struct { ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"` } -// PodSchedulingStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingStatus type for use with +// PodSchedulingContextStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingContextStatus type for use with // apply. -func PodSchedulingStatus() *PodSchedulingStatusApplyConfiguration { - return &PodSchedulingStatusApplyConfiguration{} +func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration { + return &PodSchedulingContextStatusApplyConfiguration{} } // WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the ResourceClaims field. -func (b *PodSchedulingStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingStatusApplyConfiguration { +func (b *PodSchedulingContextStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingContextStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithResourceClaims") diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go similarity index 96% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaim.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go index f94811a9..6c219f83 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -43,7 +43,7 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { b.WithName(name) b.WithNamespace(namespace) b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -58,20 +58,20 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClaim(resourceClaim *resourcev1alpha1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { +func ExtractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { return extractResourceClaim(resourceClaim, fieldManager, "") } // ExtractResourceClaimStatus is the same as ExtractResourceClaim except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { +func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { return extractResourceClaim(resourceClaim, fieldManager, "status") } -func extractResourceClaim(resourceClaim *resourcev1alpha1.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { +func extractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { b := &ResourceClaimApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha1.ResourceClaim"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaim"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func extractResourceClaim(resourceClaim *resourcev1alpha1.ResourceClaim, fieldMa b.WithNamespace(resourceClaim.Namespace) b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimconsumerreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimconsumerreference.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go index 477099cd..41bb9e9a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimconsumerreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( types "k8s.io/apimachinery/pkg/types" diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimparametersreference.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go index d7b25d75..27820ede 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClaimParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimParametersReference type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimschedulingstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go index 35ff34ab..e74679ae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimschedulingstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClaimSchedulingStatusApplyConfiguration represents an declarative configuration of the ResourceClaimSchedulingStatus type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go similarity index 93% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimspec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go index d3261904..0c73e64e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" ) // ResourceClaimSpecApplyConfiguration represents an declarative configuration of the ResourceClaimSpec type for use @@ -27,7 +27,7 @@ import ( type ResourceClaimSpecApplyConfiguration struct { ResourceClassName *string `json:"resourceClassName,omitempty"` ParametersRef *ResourceClaimParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"` - AllocationMode *resourcev1alpha1.AllocationMode `json:"allocationMode,omitempty"` + AllocationMode *resourcev1alpha2.AllocationMode `json:"allocationMode,omitempty"` } // ResourceClaimSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimSpec type for use with @@ -55,7 +55,7 @@ func (b *ResourceClaimSpecApplyConfiguration) WithParametersRef(value *ResourceC // WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AllocationMode field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha1.AllocationMode) *ResourceClaimSpecApplyConfiguration { +func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha2.AllocationMode) *ResourceClaimSpecApplyConfiguration { b.AllocationMode = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go index e2283f8b..c6fa6109 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClaimStatusApplyConfiguration represents an declarative configuration of the ResourceClaimStatus type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go similarity index 96% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplate.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go index e3c602cb..fc2209b8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -42,7 +42,7 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo b.WithName(name) b.WithNamespace(namespace) b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -57,20 +57,20 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { +func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "") } // ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { +func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status") } -func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { +func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { b := &ResourceClaimTemplateApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha1.ResourceClaimTemplate"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaimTemplate"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -78,7 +78,7 @@ func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha1.Resour b.WithNamespace(resourceClaimTemplate.Namespace) b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplatespec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go index 88058e06..2f38ea03 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go similarity index 96% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclass.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go index 5f980acd..724c9e88 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -44,7 +44,7 @@ func ResourceClass(name string) *ResourceClassApplyConfiguration { b := &ResourceClassApplyConfiguration{} b.WithName(name) b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -59,27 +59,27 @@ func ResourceClass(name string) *ResourceClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClass(resourceClass *resourcev1alpha1.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { +func ExtractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { return extractResourceClass(resourceClass, fieldManager, "") } // ExtractResourceClassStatus is the same as ExtractResourceClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClassStatus(resourceClass *resourcev1alpha1.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { +func ExtractResourceClassStatus(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { return extractResourceClass(resourceClass, fieldManager, "status") } -func extractResourceClass(resourceClass *resourcev1alpha1.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) { +func extractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) { b := &ResourceClassApplyConfiguration{} - err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha1.ResourceClass"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClass"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(resourceClass.Name) b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclassparametersreference.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go index b03a9a6d..d67e4d39 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclassparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClassParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClassParametersReference type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go new file mode 100644 index 00000000..028cbaa1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// ResourceHandleApplyConfiguration represents an declarative configuration of the ResourceHandle type for use +// with apply. +type ResourceHandleApplyConfiguration struct { + DriverName *string `json:"driverName,omitempty"` + Data *string `json:"data,omitempty"` +} + +// ResourceHandleApplyConfiguration constructs an declarative configuration of the ResourceHandle type for use with +// apply. +func ResourceHandle() *ResourceHandleApplyConfiguration { + return &ResourceHandleApplyConfiguration{} +} + +// WithDriverName sets the DriverName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *ResourceHandleApplyConfiguration) WithDriverName(value string) *ResourceHandleApplyConfiguration { + b.DriverName = &value + return b +} + +// WithData sets the Data field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Data field is set to the value of the last call. +func (b *ResourceHandleApplyConfiguration) WithData(value string) *ResourceHandleApplyConfiguration { + b.Data = &value + return b +} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 1253fa1f..5490b482 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -67,6 +67,9 @@ const ( acceptDiscoveryFormats = AcceptV2Beta1 + "," + AcceptV1 ) +// Aggregated discovery content-type GVK. +var v2Beta1GVK = schema.GroupVersionKind{Group: "apidiscovery.k8s.io", Version: "v2beta1", Kind: "APIGroupDiscoveryList"} + // DiscoveryInterface holds the methods that discover server-supported API groups, // versions and resources. type DiscoveryInterface interface { @@ -260,16 +263,15 @@ func (d *DiscoveryClient) downloadLegacy() ( } var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList - // Switch on content-type server responded with: aggregated or unaggregated. - switch { - case isV2Beta1ContentType(responseContentType): + // Based on the content-type server responded with: aggregated or unaggregated. + if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { return nil, nil, nil, err } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) - default: + } else { // Default is unaggregated discovery v1. var v metav1.APIVersions err = json.Unmarshal(body, &v) @@ -313,16 +315,15 @@ func (d *DiscoveryClient) downloadAPIs() ( apiGroupList := &metav1.APIGroupList{} failedGVs := map[schema.GroupVersion]error{} var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList - // Switch on content-type server responded with: aggregated or unaggregated. - switch { - case isV2Beta1ContentType(responseContentType): + // Based on the content-type server responded with: aggregated or unaggregated. + if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { return nil, nil, nil, err } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) - default: + } else { // Default is unaggregated discovery v1. err = json.Unmarshal(body, apiGroupList) if err != nil { @@ -333,26 +334,29 @@ func (d *DiscoveryClient) downloadAPIs() ( return apiGroupList, resourcesByGV, failedGVs, nil } -// isV2Beta1ContentType checks of the content-type string is both -// "application/json" and contains the v2beta1 content-type params. +// ContentTypeIsGVK checks of the content-type string is both +// "application/json" and matches the provided GVK. An error +// is returned if the content type string is malformed. // NOTE: This function is resilient to the ordering of the // content-type parameters, as well as parameters added by // intermediaries such as proxies or gateways. Examples: // -// "application/json; g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" = true -// "application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io" = true -// "application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io;charset=utf-8" = true -// "application/json" = false -// "application/json; charset=UTF-8" = false -func isV2Beta1ContentType(contentType string) bool { +// ("application/json; g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) +// ("application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) +// ("application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io;charset=utf-8", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) +// ("application/json", any GVK) = (false, nil) +// ("application/json; charset=UTF-8", any GVK) = (false, nil) +// ("malformed content type string", any GVK) = (false, error) +func ContentTypeIsGVK(contentType string, gvk schema.GroupVersionKind) (bool, error) { base, params, err := mime.ParseMediaType(contentType) if err != nil { - return false + return false, err } - return runtime.ContentTypeJSON == base && - params["g"] == "apidiscovery.k8s.io" && - params["v"] == "v2beta1" && - params["as"] == "APIGroupDiscoveryList" + gvkMatch := runtime.ContentTypeJSON == base && + params["g"] == gvk.Group && + params["v"] == gvk.Version && + params["as"] == gvk.Kind + return gvkMatch, nil } // ServerGroups returns the supported groups, with information like supported versions and the diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 9eecbb2a..6345f2fb 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -42,6 +42,7 @@ import ( batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" certificatesv1 "k8s.io/client-go/kubernetes/typed/certificates/v1" + certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" @@ -66,7 +67,7 @@ import ( rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - resourcev1alpha1 "k8s.io/client-go/kubernetes/typed/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" @@ -99,6 +100,7 @@ type Interface interface { BatchV1beta1() batchv1beta1.BatchV1beta1Interface CertificatesV1() certificatesv1.CertificatesV1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface + CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface @@ -122,7 +124,7 @@ type Interface interface { RbacV1() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface - ResourceV1alpha1() resourcev1alpha1.ResourceV1alpha1Interface + ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface @@ -154,6 +156,7 @@ type Clientset struct { batchV1beta1 *batchv1beta1.BatchV1beta1Client certificatesV1 *certificatesv1.CertificatesV1Client certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client + certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client coordinationV1 *coordinationv1.CoordinationV1Client coreV1 *corev1.CoreV1Client @@ -177,7 +180,7 @@ type Clientset struct { rbacV1 *rbacv1.RbacV1Client rbacV1beta1 *rbacv1beta1.RbacV1beta1Client rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - resourceV1alpha1 *resourcev1alpha1.ResourceV1alpha1Client + resourceV1alpha2 *resourcev1alpha2.ResourceV1alpha2Client schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client schedulingV1 *schedulingv1.SchedulingV1Client @@ -286,6 +289,11 @@ func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta return c.certificatesV1beta1 } +// CertificatesV1alpha1 retrieves the CertificatesV1alpha1Client +func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface { + return c.certificatesV1alpha1 +} + // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 @@ -401,9 +409,9 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return c.rbacV1alpha1 } -// ResourceV1alpha1 retrieves the ResourceV1alpha1Client -func (c *Clientset) ResourceV1alpha1() resourcev1alpha1.ResourceV1alpha1Interface { - return c.resourceV1alpha1 +// ResourceV1alpha2 retrieves the ResourceV1alpha2Client +func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { + return c.resourceV1alpha2 } // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client @@ -560,6 +568,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.certificatesV1alpha1, err = certificatesv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -652,7 +664,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.resourceV1alpha1, err = resourcev1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.resourceV1alpha2, err = resourcev1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -721,6 +733,7 @@ func New(c rest.Interface) *Clientset { cs.batchV1beta1 = batchv1beta1.New(c) cs.certificatesV1 = certificatesv1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) + cs.certificatesV1alpha1 = certificatesv1alpha1.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) @@ -744,7 +757,7 @@ func New(c rest.Interface) *Clientset { cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) - cs.resourceV1alpha1 = resourcev1alpha1.New(c) + cs.resourceV1alpha2 = resourcev1alpha2.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) cs.schedulingV1 = schedulingv1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go index b272334a..9cef4242 100644 --- a/vendor/k8s.io/client-go/kubernetes/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. +// Package kubernetes holds packages which implement a clientset for Kubernetes +// APIs. package kubernetes diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index e4378052..64d3ce2a 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -38,6 +38,7 @@ import ( batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" @@ -62,7 +63,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -100,6 +101,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ batchv1beta1.AddToScheme, certificatesv1.AddToScheme, certificatesv1beta1.AddToScheme, + certificatesv1alpha1.AddToScheme, coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, @@ -123,7 +125,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, - resourcev1alpha1.AddToScheme, + resourcev1alpha2.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go index ba827f3c..1d994b5a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -43,6 +43,7 @@ type ValidatingAdmissionPoliciesGetter interface { type ValidatingAdmissionPolicyInterface interface { Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) @@ -50,6 +51,7 @@ type ValidatingAdmissionPolicyInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } @@ -132,6 +134,21 @@ func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmi return } +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Put(). + Resource("validatingadmissionpolicies"). + Name(validatingAdmissionPolicy.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicy). + Do(ctx). + Into(result) + return +} + // Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { return c.client.Delete(). @@ -195,3 +212,32 @@ func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmis Into(result) return } + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + if validatingAdmissionPolicy == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(validatingAdmissionPolicy) + if err != nil { + return nil, err + } + + name := validatingAdmissionPolicy.Name + if name == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") + } + + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Patch(types.ApplyPatchType). + Resource("validatingadmissionpolicies"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 218cb60c..7823729e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -28,6 +28,7 @@ import ( type AuthenticationV1beta1Interface interface { RESTClient() rest.Interface + SelfSubjectReviewsGetter TokenReviewsGetter } @@ -36,6 +37,10 @@ type AuthenticationV1beta1Client struct { restClient rest.Interface } +func (c *AuthenticationV1beta1Client) SelfSubjectReviews() SelfSubjectReviewInterface { + return newSelfSubjectReviews(c) +} + func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface { return newTokenReviews(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go index 60bf15ab..527a458d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go @@ -18,4 +18,6 @@ limitations under the License. package v1beta1 +type SelfSubjectReviewExpansion interface{} + type TokenReviewExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go new file mode 100644 index 00000000..9d54826a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/authentication/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. +// A group's client should implement this interface. +type SelfSubjectReviewsGetter interface { + SelfSubjectReviews() SelfSubjectReviewInterface +} + +// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. +type SelfSubjectReviewInterface interface { + Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectReview, error) + SelfSubjectReviewExpansion +} + +// selfSubjectReviews implements SelfSubjectReviewInterface +type selfSubjectReviews struct { + client rest.Interface +} + +// newSelfSubjectReviews returns a SelfSubjectReviews +func newSelfSubjectReviews(c *AuthenticationV1beta1Client) *selfSubjectReviews { + return &selfSubjectReviews{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. +func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) { + result = &v1beta1.SelfSubjectReview{} + err = c.client.Post(). + Resource("selfsubjectreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(selfSubjectReview). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go new file mode 100644 index 00000000..a9050af9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/certificates/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CertificatesV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterTrustBundlesGetter +} + +// CertificatesV1alpha1Client is used to interact with features provided by the certificates.k8s.io group. +type CertificatesV1alpha1Client struct { + restClient rest.Interface +} + +func (c *CertificatesV1alpha1Client) ClusterTrustBundles() ClusterTrustBundleInterface { + return newClusterTrustBundles(c) +} + +// NewForConfig creates a new CertificatesV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*CertificatesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CertificatesV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CertificatesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &CertificatesV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new CertificatesV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertificatesV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertificatesV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *CertificatesV1alpha1Client { + return &CertificatesV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertificatesV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go new file mode 100644 index 00000000..970fb15e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/certificates/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterTrustBundlesGetter has a method to return a ClusterTrustBundleInterface. +// A group's client should implement this interface. +type ClusterTrustBundlesGetter interface { + ClusterTrustBundles() ClusterTrustBundleInterface +} + +// ClusterTrustBundleInterface has methods to work with ClusterTrustBundle resources. +type ClusterTrustBundleInterface interface { + Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (*v1alpha1.ClusterTrustBundle, error) + Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (*v1alpha1.ClusterTrustBundle, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterTrustBundle, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterTrustBundleList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) + Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) + ClusterTrustBundleExpansion +} + +// clusterTrustBundles implements ClusterTrustBundleInterface +type clusterTrustBundles struct { + client rest.Interface +} + +// newClusterTrustBundles returns a ClusterTrustBundles +func newClusterTrustBundles(c *CertificatesV1alpha1Client) *clusterTrustBundles { + return &clusterTrustBundles{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any. +func (c *clusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Get(). + Resource("clustertrustbundles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors. +func (c *clusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterTrustBundleList{} + err = c.client.Get(). + Resource("clustertrustbundles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterTrustBundles. +func (c *clusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clustertrustbundles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterTrustBundle and creates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. +func (c *clusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Post(). + Resource("clustertrustbundles"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterTrustBundle). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. +func (c *clusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Put(). + Resource("clustertrustbundles"). + Name(clusterTrustBundle.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterTrustBundle). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterTrustBundle and deletes it. Returns an error if one occurs. +func (c *clusterTrustBundles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustertrustbundles"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clustertrustbundles"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterTrustBundle. +func (c *clusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Patch(pt). + Resource("clustertrustbundles"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterTrustBundle. +func (c *clusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + if clusterTrustBundle == nil { + return nil, fmt.Errorf("clusterTrustBundle provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(clusterTrustBundle) + if err != nil { + return nil, err + } + name := clusterTrustBundle.Name + if name == nil { + return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply") + } + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Patch(types.ApplyPatchType). + Resource("clustertrustbundles"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go similarity index 88% rename from vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go index ee865e56..43cc534b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go @@ -16,5 +16,6 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -// This package has the automatically generated clientset. -package clientset +package v1alpha1 + +type ClusterTrustBundleExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index 464fff91..562f8d5e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -82,8 +82,7 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // It returns the copy of the event that the server returns, or an error. // The namespace and name of the target event is deduced from the event. // The namespace must either match this event client's namespace, or this event client must -// -// have been created with the "" namespace. +// have been created with the "" namespace. func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { if e.ns != "" && event.Namespace != e.ns { return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 827b514d..4725d2cd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -32,7 +32,6 @@ type ExtensionsV1beta1Interface interface { DeploymentsGetter IngressesGetter NetworkPoliciesGetter - PodSecurityPoliciesGetter ReplicaSetsGetter } @@ -57,10 +56,6 @@ func (c *ExtensionsV1beta1Client) NetworkPolicies(namespace string) NetworkPolic return newNetworkPolicies(c, namespace) } -func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index 41d28f04..67fcf499 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -24,6 +24,4 @@ type IngressExpansion interface{} type NetworkPolicyExpansion interface{} -type PodSecurityPolicyExpansion interface{} - type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 3f38c313..00000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) - Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) - Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client rest.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. -func (c *podSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { - if podSecurityPolicy == nil { - return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSecurityPolicy) - if err != nil { - return nil, err - } - name := podSecurityPolicy.Name - if name == nil { - return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") - } - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("podsecuritypolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go index ab41abb7..9c2979d6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go @@ -19,3 +19,5 @@ limitations under the License. package v1alpha1 type ClusterCIDRExpansion interface{} + +type IPAddressExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go new file mode 100644 index 00000000..fff193d6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// IPAddressesGetter has a method to return a IPAddressInterface. +// A group's client should implement this interface. +type IPAddressesGetter interface { + IPAddresses() IPAddressInterface +} + +// IPAddressInterface has methods to work with IPAddress resources. +type IPAddressInterface interface { + Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (*v1alpha1.IPAddress, error) + Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (*v1alpha1.IPAddress, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IPAddress, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IPAddressList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) + Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) + IPAddressExpansion +} + +// iPAddresses implements IPAddressInterface +type iPAddresses struct { + client rest.Interface +} + +// newIPAddresses returns a IPAddresses +func newIPAddresses(c *NetworkingV1alpha1Client) *iPAddresses { + return &iPAddresses{ + client: c.RESTClient(), + } +} + +// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. +func (c *iPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Get(). + Resource("ipaddresses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of IPAddresses that match those selectors. +func (c *iPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.IPAddressList{} + err = c.client.Get(). + Resource("ipaddresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested iPAddresses. +func (c *iPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("ipaddresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. +func (c *iPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Post(). + Resource("ipaddresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPAddress). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. +func (c *iPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Put(). + Resource("ipaddresses"). + Name(iPAddress.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPAddress). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs. +func (c *iPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("ipaddresses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *iPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("ipaddresses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched iPAddress. +func (c *iPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Patch(pt). + Resource("ipaddresses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress. +func (c *iPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) { + if iPAddress == nil { + return nil, fmt.Errorf("iPAddress provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(iPAddress) + if err != nil { + return nil, err + } + name := iPAddress.Name + if name == nil { + return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") + } + result = &v1alpha1.IPAddress{} + err = c.client.Patch(types.ApplyPatchType). + Resource("ipaddresses"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go index ccb59331..884c846f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -29,6 +29,7 @@ import ( type NetworkingV1alpha1Interface interface { RESTClient() rest.Interface ClusterCIDRsGetter + IPAddressesGetter } // NetworkingV1alpha1Client is used to interact with features provided by the networking.k8s.io group. @@ -40,6 +41,10 @@ func (c *NetworkingV1alpha1Client) ClusterCIDRs() ClusterCIDRInterface { return newClusterCIDRs(c) } +func (c *NetworkingV1alpha1Client) IPAddresses() IPAddressInterface { + return newIPAddresses(c) +} + // NewForConfig creates a new NetworkingV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go deleted file mode 100644 index e163a845..00000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "k8s.io/api/resource/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSchedulingsGetter has a method to return a PodSchedulingInterface. -// A group's client should implement this interface. -type PodSchedulingsGetter interface { - PodSchedulings(namespace string) PodSchedulingInterface -} - -// PodSchedulingInterface has methods to work with PodScheduling resources. -type PodSchedulingInterface interface { - Create(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.CreateOptions) (*v1alpha1.PodScheduling, error) - Update(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (*v1alpha1.PodScheduling, error) - UpdateStatus(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (*v1alpha1.PodScheduling, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodScheduling, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodSchedulingList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodScheduling, err error) - Apply(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) - ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) - PodSchedulingExpansion -} - -// podSchedulings implements PodSchedulingInterface -type podSchedulings struct { - client rest.Interface - ns string -} - -// newPodSchedulings returns a PodSchedulings -func newPodSchedulings(c *ResourceV1alpha1Client, namespace string) *podSchedulings { - return &podSchedulings{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podScheduling, and returns the corresponding podScheduling object, and an error if there is any. -func (c *podSchedulings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podschedulings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSchedulings that match those selectors. -func (c *podSchedulings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodSchedulingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PodSchedulingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podschedulings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSchedulings. -func (c *podSchedulings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podschedulings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podScheduling and creates it. Returns the server's representation of the podScheduling, and an error, if there is any. -func (c *podSchedulings) Create(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.CreateOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podschedulings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podScheduling). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podScheduling and updates it. Returns the server's representation of the podScheduling, and an error, if there is any. -func (c *podSchedulings) Update(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podschedulings"). - Name(podScheduling.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podScheduling). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podSchedulings) UpdateStatus(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podschedulings"). - Name(podScheduling.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podScheduling). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podScheduling and deletes it. Returns an error if one occurs. -func (c *podSchedulings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podschedulings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSchedulings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podschedulings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podScheduling. -func (c *podSchedulings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podschedulings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podScheduling. -func (c *podSchedulings) Apply(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) { - if podScheduling == nil { - return nil, fmt.Errorf("podScheduling provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podScheduling) - if err != nil { - return nil, err - } - name := podScheduling.Name - if name == nil { - return nil, fmt.Errorf("podScheduling.Name must be provided to Apply") - } - result = &v1alpha1.PodScheduling{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podschedulings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podSchedulings) ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) { - if podScheduling == nil { - return nil, fmt.Errorf("podScheduling provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podScheduling) - if err != nil { - return nil, err - } - - name := podScheduling.Name - if name == nil { - return nil, fmt.Errorf("podScheduling.Name must be provided to Apply") - } - - result = &v1alpha1.PodScheduling{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podschedulings"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go new file mode 100644 index 00000000..baaf2d98 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go similarity index 92% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go index df88c2f9..2c02e9ce 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go @@ -16,9 +16,9 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 -type PodSchedulingExpansion interface{} +type PodSchedulingContextExpansion interface{} type ResourceClaimExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go new file mode 100644 index 00000000..72e81a29 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go @@ -0,0 +1,256 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha2 "k8s.io/api/resource/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface. +// A group's client should implement this interface. +type PodSchedulingContextsGetter interface { + PodSchedulingContexts(namespace string) PodSchedulingContextInterface +} + +// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources. +type PodSchedulingContextInterface interface { + Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha2.PodSchedulingContext, error) + Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) + UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.PodSchedulingContext, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.PodSchedulingContextList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) + Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) + ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) + PodSchedulingContextExpansion +} + +// podSchedulingContexts implements PodSchedulingContextInterface +type podSchedulingContexts struct { + client rest.Interface + ns string +} + +// newPodSchedulingContexts returns a PodSchedulingContexts +func newPodSchedulingContexts(c *ResourceV1alpha2Client, namespace string) *podSchedulingContexts { + return &podSchedulingContexts{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any. +func (c *podSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors. +func (c *podSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.PodSchedulingContextList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSchedulingContexts. +func (c *podSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. +func (c *podSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSchedulingContext). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. +func (c *podSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(podSchedulingContext.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSchedulingContext). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *podSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(podSchedulingContext.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSchedulingContext). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs. +func (c *podSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched podSchedulingContext. +func (c *podSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext. +func (c *podSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { + if podSchedulingContext == nil { + return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(podSchedulingContext) + if err != nil { + return nil, err + } + name := podSchedulingContext.Name + if name == nil { + return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") + } + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *podSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { + if podSchedulingContext == nil { + return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(podSchedulingContext) + if err != nil { + return nil, err + } + + name := podSchedulingContext.Name + if name == nil { + return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") + } + + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go similarity index 66% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resource_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go index 2355bf7c..d5795fd6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resource_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go @@ -16,49 +16,49 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "net/http" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type ResourceV1alpha1Interface interface { +type ResourceV1alpha2Interface interface { RESTClient() rest.Interface - PodSchedulingsGetter + PodSchedulingContextsGetter ResourceClaimsGetter ResourceClaimTemplatesGetter ResourceClassesGetter } -// ResourceV1alpha1Client is used to interact with features provided by the resource.k8s.io group. -type ResourceV1alpha1Client struct { +// ResourceV1alpha2Client is used to interact with features provided by the resource.k8s.io group. +type ResourceV1alpha2Client struct { restClient rest.Interface } -func (c *ResourceV1alpha1Client) PodSchedulings(namespace string) PodSchedulingInterface { - return newPodSchedulings(c, namespace) +func (c *ResourceV1alpha2Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface { + return newPodSchedulingContexts(c, namespace) } -func (c *ResourceV1alpha1Client) ResourceClaims(namespace string) ResourceClaimInterface { +func (c *ResourceV1alpha2Client) ResourceClaims(namespace string) ResourceClaimInterface { return newResourceClaims(c, namespace) } -func (c *ResourceV1alpha1Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { +func (c *ResourceV1alpha2Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { return newResourceClaimTemplates(c, namespace) } -func (c *ResourceV1alpha1Client) ResourceClasses() ResourceClassInterface { +func (c *ResourceV1alpha2Client) ResourceClasses() ResourceClassInterface { return newResourceClasses(c) } -// NewForConfig creates a new ResourceV1alpha1Client for the given config. +// NewForConfig creates a new ResourceV1alpha2Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*ResourceV1alpha1Client, error) { +func NewForConfig(c *rest.Config) (*ResourceV1alpha2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -70,9 +70,9 @@ func NewForConfig(c *rest.Config) (*ResourceV1alpha1Client, error) { return NewForConfigAndClient(&config, httpClient) } -// NewForConfigAndClient creates a new ResourceV1alpha1Client for the given config and http client. +// NewForConfigAndClient creates a new ResourceV1alpha2Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha1Client, error) { +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -81,12 +81,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha1Cli if err != nil { return nil, err } - return &ResourceV1alpha1Client{client}, nil + return &ResourceV1alpha2Client{client}, nil } -// NewForConfigOrDie creates a new ResourceV1alpha1Client for the given config and +// NewForConfigOrDie creates a new ResourceV1alpha2Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha2Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -94,13 +94,13 @@ func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha1Client { return client } -// New creates a new ResourceV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *ResourceV1alpha1Client { - return &ResourceV1alpha1Client{c} +// New creates a new ResourceV1alpha2Client for the given RESTClient. +func New(c rest.Interface) *ResourceV1alpha2Client { + return &ResourceV1alpha2Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := v1alpha2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() @@ -114,7 +114,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *ResourceV1alpha1Client) RESTClient() rest.Interface { +func (c *ResourceV1alpha2Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go similarity index 79% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaim.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go index cd2d0c78..cfb27c9d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,17 +41,17 @@ type ResourceClaimsGetter interface { // ResourceClaimInterface has methods to work with ResourceClaim resources. type ResourceClaimInterface interface { - Create(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.CreateOptions) (*v1alpha1.ResourceClaim, error) - Update(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (*v1alpha1.ResourceClaim, error) - UpdateStatus(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (*v1alpha1.ResourceClaim, error) + Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (*v1alpha2.ResourceClaim, error) + Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) + UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceClaim, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceClaimList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaim, err error) - Apply(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) - ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) + Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) + ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) ResourceClaimExpansion } @@ -62,7 +62,7 @@ type resourceClaims struct { } // newResourceClaims returns a ResourceClaims -func newResourceClaims(c *ResourceV1alpha1Client, namespace string) *resourceClaims { +func newResourceClaims(c *ResourceV1alpha2Client, namespace string) *resourceClaims { return &resourceClaims{ client: c.RESTClient(), ns: namespace, @@ -70,8 +70,8 @@ func newResourceClaims(c *ResourceV1alpha1Client, namespace string) *resourceCla } // Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. -func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaims"). @@ -83,12 +83,12 @@ func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOpt } // List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. -func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClaimList, err error) { +func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ResourceClaimList{} + result = &v1alpha2.ResourceClaimList{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaims"). @@ -115,8 +115,8 @@ func (c *resourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.CreateOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Post(). Namespace(c.ns). Resource("resourceclaims"). @@ -128,8 +128,8 @@ func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha1.Res } // Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Put(). Namespace(c.ns). Resource("resourceclaims"). @@ -143,8 +143,8 @@ func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha1.Res // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Put(). Namespace(c.ns). Resource("resourceclaims"). @@ -185,8 +185,8 @@ func (c *resourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOpt } // Patch applies the patch and returns the patched resourceClaim. -func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Patch(pt). Namespace(c.ns). Resource("resourceclaims"). @@ -200,7 +200,7 @@ func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchT } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. -func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -213,7 +213,7 @@ func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alp if name == nil { return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } - result = &v1alpha1.ResourceClaim{} + result = &v1alpha2.ResourceClaim{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("resourceclaims"). @@ -227,7 +227,7 @@ func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alp // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -242,7 +242,7 @@ func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourc return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } - result = &v1alpha1.ResourceClaim{} + result = &v1alpha2.ResourceClaim{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("resourceclaims"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go similarity index 80% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaimtemplate.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go index b6cc3d96..3f4e3200 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,15 +41,15 @@ type ResourceClaimTemplatesGetter interface { // ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. type ResourceClaimTemplateInterface interface { - Create(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha1.ResourceClaimTemplate, error) - Update(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha1.ResourceClaimTemplate, error) + Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha2.ResourceClaimTemplate, error) + Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha2.ResourceClaimTemplate, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceClaimTemplate, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceClaimTemplateList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaimTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimTemplateList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaimTemplate, err error) - Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaimTemplate, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) + Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) ResourceClaimTemplateExpansion } @@ -60,7 +60,7 @@ type resourceClaimTemplates struct { } // newResourceClaimTemplates returns a ResourceClaimTemplates -func newResourceClaimTemplates(c *ResourceV1alpha1Client, namespace string) *resourceClaimTemplates { +func newResourceClaimTemplates(c *ResourceV1alpha2Client, namespace string) *resourceClaimTemplates { return &resourceClaimTemplates{ client: c.RESTClient(), ns: namespace, @@ -68,8 +68,8 @@ func newResourceClaimTemplates(c *ResourceV1alpha1Client, namespace string) *res } // Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. -func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -81,12 +81,12 @@ func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v } // List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. -func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClaimTemplateList, err error) { +func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ResourceClaimTemplateList{} + result = &v1alpha2.ResourceClaimTemplateList{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -113,8 +113,8 @@ func (c *resourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) } // Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Post(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -126,8 +126,8 @@ func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTempla } // Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Put(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -167,8 +167,8 @@ func (c *resourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.D } // Patch applies the patch and returns the patched resourceClaimTemplate. -func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Patch(pt). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -182,7 +182,7 @@ func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt type } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. -func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { +func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { if resourceClaimTemplate == nil { return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") } @@ -195,7 +195,7 @@ func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplat if name == nil { return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") } - result = &v1alpha1.ResourceClaimTemplate{} + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("resourceclaimtemplates"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go similarity index 80% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclass.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go index 9c8b4546..95a4ac56 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,15 +41,15 @@ type ResourceClassesGetter interface { // ResourceClassInterface has methods to work with ResourceClass resources. type ResourceClassInterface interface { - Create(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.CreateOptions) (*v1alpha1.ResourceClass, error) - Update(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.UpdateOptions) (*v1alpha1.ResourceClass, error) + Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (*v1alpha2.ResourceClass, error) + Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (*v1alpha2.ResourceClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClass, err error) - Apply(ctx context.Context, resourceClass *resourcev1alpha1.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) + Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) ResourceClassExpansion } @@ -59,15 +59,15 @@ type resourceClasses struct { } // newResourceClasses returns a ResourceClasses -func newResourceClasses(c *ResourceV1alpha1Client) *resourceClasses { +func newResourceClasses(c *ResourceV1alpha2Client) *resourceClasses { return &resourceClasses{ client: c.RESTClient(), } } // Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. -func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Get(). Resource("resourceclasses"). Name(name). @@ -78,12 +78,12 @@ func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOp } // List takes label and field selectors, and returns the list of ResourceClasses that match those selectors. -func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClassList, err error) { +func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ResourceClassList{} + result = &v1alpha2.ResourceClassList{} err = c.client.Get(). Resource("resourceclasses"). VersionedParams(&opts, scheme.ParameterCodec). @@ -108,8 +108,8 @@ func (c *resourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a resourceClass and creates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.CreateOptions) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Post(). Resource("resourceclasses"). VersionedParams(&opts, scheme.ParameterCodec). @@ -120,8 +120,8 @@ func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha1.Re } // Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.UpdateOptions) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Put(). Resource("resourceclasses"). Name(resourceClass.Name). @@ -158,8 +158,8 @@ func (c *resourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOp } // Patch applies the patch and returns the patched resourceClass. -func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Patch(pt). Resource("resourceclasses"). Name(name). @@ -172,7 +172,7 @@ func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.Patch } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass. -func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha1.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClass, err error) { +func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) { if resourceClass == nil { return nil, fmt.Errorf("resourceClass provided to Apply must not be nil") } @@ -185,7 +185,7 @@ func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1al if name == nil { return nil, fmt.Errorf("resourceClass.Name must be provided to Apply") } - result = &v1alpha1.ResourceClass{} + result = &v1alpha2.ResourceClass{} err = c.client.Patch(types.ApplyPatchType). Resource("resourceclasses"). Name(*name). diff --git a/vendor/k8s.io/client-go/openapi/OWNERS b/vendor/k8s.io/client-go/openapi/OWNERS new file mode 100644 index 00000000..e6100942 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index 51e34dda..676d51d3 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -43,7 +43,8 @@ var ( gitMinor string = "" // minor version, numeric possibly followed by "+" // semantic version, derived by build scripts (see - // https://git.k8s.io/community/contributors/design-proposals/release/versioning.md + // https://github.com/kubernetes/sig-release/blob/master/release-engineering/versioning.md#kubernetes-release-versioning + // https://kubernetes.io/releases/version-skew-policy/ // for a detailed discussion of this field) // // TODO: This field is still called "gitVersion" for legacy diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go index 2cf821bc..60df7e56 100644 --- a/vendor/k8s.io/client-go/rest/client.go +++ b/vendor/k8s.io/client-go/rest/client.go @@ -52,8 +52,7 @@ type Interface interface { // ClientContentConfig controls how RESTClient communicates with the server. // // TODO: ContentConfig will be updated to accept a Negotiator instead of a -// -// NegotiatedSerializer and NegotiatedSerializer will be removed. +// NegotiatedSerializer and NegotiatedSerializer will be removed. type ClientContentConfig struct { // AcceptContentTypes specifies the types the client will accept and is optional. // If not set, ContentType will be used to define the Accept header diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 96e72569..bb6fb4de 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -481,7 +481,13 @@ func (r *Request) Body(obj interface{}) *Request { return r } -// URL returns the current working URL. +// Error returns any error encountered constructing the request, if any. +func (r *Request) Error() error { + return r.err +} + +// URL returns the current working URL. Check the result of Error() to ensure +// that the returned URL is valid. func (r *Request) URL() *url.URL { p := r.pathPrefix if r.namespaceSet && len(r.namespace) > 0 { @@ -726,7 +732,6 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) retry.After(ctx, r, resp, err) if err == nil && resp.StatusCode == http.StatusOK { return r.newStreamWatcher(resp) @@ -786,22 +791,36 @@ func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) ), nil } -// updateURLMetrics is a convenience function for pushing metrics. -// It also handles corner cases for incomplete/invalid request data. -func updateURLMetrics(ctx context.Context, req *Request, resp *http.Response, err error) { - url := "none" +// updateRequestResultMetric increments the RequestResult metric counter, +// it should be called with the (response, err) tuple from the final +// reply from the server. +func updateRequestResultMetric(ctx context.Context, req *Request, resp *http.Response, err error) { + code, host := sanitize(req, resp, err) + metrics.RequestResult.Increment(ctx, code, req.verb, host) +} + +// updateRequestRetryMetric increments the RequestRetry metric counter, +// it should be called with the (response, err) tuple for each retry +// except for the final attempt. +func updateRequestRetryMetric(ctx context.Context, req *Request, resp *http.Response, err error) { + code, host := sanitize(req, resp, err) + metrics.RequestRetry.IncrementRetry(ctx, code, req.verb, host) +} + +func sanitize(req *Request, resp *http.Response, err error) (string, string) { + host := "none" if req.c.base != nil { - url = req.c.base.Host + host = req.c.base.Host } // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric // system so we just report them as ``. - if err != nil { - metrics.RequestResult.Increment(ctx, "", req.verb, url) - } else { - // Metrics for failure codes - metrics.RequestResult.Increment(ctx, strconv.Itoa(resp.StatusCode), req.verb, url) + code := "" + if resp != nil { + code = strconv.Itoa(resp.StatusCode) } + + return code, host } // Stream formats and executes the request, and offers streaming of the response. @@ -834,7 +853,6 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { return nil, err } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) retry.After(ctx, r, resp, err) if err != nil { // we only retry on an HTTP response with 'Retry-After' header @@ -979,7 +997,6 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp return err } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) // The value -1 or a value of 0 with a non-nil Body indicates that the length is unknown. // https://pkg.go.dev/net/http#Request if req.ContentLength >= 0 && !(req.Body != nil && req.ContentLength == 0) { diff --git a/vendor/k8s.io/client-go/rest/with_retry.go b/vendor/k8s.io/client-go/rest/with_retry.go index 207060a5..eaaadc6a 100644 --- a/vendor/k8s.io/client-go/rest/with_retry.go +++ b/vendor/k8s.io/client-go/rest/with_retry.go @@ -242,8 +242,20 @@ func (r *withRetry) After(ctx context.Context, request *Request, resp *http.Resp // parameters calculated from the (response, err) tuple from // attempt N-1, so r.retryAfter is outdated and should not be // referred to here. + isRetry := r.retryAfter != nil r.retryAfter = nil + // the client finishes a single request after N attempts (1..N) + // - all attempts (1..N) are counted to the rest_client_requests_total + // metric (current behavior). + // - every attempt after the first (2..N) are counted to the + // rest_client_request_retries_total metric. + updateRequestResultMetric(ctx, request, resp, err) + if isRetry { + // this is attempt 2 or later + updateRequestRetryMetric(ctx, request, resp, err) + } + if request.c.base != nil { if err != nil { request.backoff.UpdateBackoff(request.URL(), err, 0) @@ -346,8 +358,12 @@ func retryAfterResponse() *http.Response { } func retryAfterResponseWithDelay(delay string) *http.Response { + return retryAfterResponseWithCodeAndDelay(http.StatusInternalServerError, delay) +} + +func retryAfterResponseWithCodeAndDelay(code int, delay string) *http.Response { return &http.Response{ - StatusCode: http.StatusInternalServerError, + StatusCode: code, Header: http.Header{"Retry-After": []string{delay}}, } } diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 96005ff5..f437f286 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -18,6 +18,7 @@ package cache import ( "errors" + "os" "sync" "time" @@ -50,11 +51,12 @@ type Config struct { Process ProcessFunc // ObjectType is an example object of the type this controller is - // expected to handle. Only the type needs to be right, except - // that when that is `unstructured.Unstructured` the object's - // `"apiVersion"` and `"kind"` must also be right. + // expected to handle. ObjectType runtime.Object + // ObjectDescription is the description to use when logging type-specific information about this controller. + ObjectDescription string + // FullResyncPeriod is the period at which ShouldResync is considered. FullResyncPeriod time.Duration @@ -84,7 +86,7 @@ type Config struct { type ShouldResyncFunc func() bool // ProcessFunc processes a single object. -type ProcessFunc func(obj interface{}) error +type ProcessFunc func(obj interface{}, isInInitialList bool) error // `*controller` implements Controller type controller struct { @@ -131,18 +133,24 @@ func (c *controller) Run(stopCh <-chan struct{}) { <-stopCh c.config.Queue.Close() }() - r := NewReflector( + r := NewReflectorWithOptions( c.config.ListerWatcher, c.config.ObjectType, c.config.Queue, - c.config.FullResyncPeriod, + ReflectorOptions{ + ResyncPeriod: c.config.FullResyncPeriod, + TypeDescription: c.config.ObjectDescription, + Clock: c.clock, + }, ) r.ShouldResync = c.config.ShouldResync r.WatchListPageSize = c.config.WatchListPageSize - r.clock = c.clock if c.config.WatchErrorHandler != nil { r.watchErrorHandler = c.config.WatchErrorHandler } + if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 { + r.UseWatchList = true + } c.reflectorMutex.Lock() c.reflector = r @@ -211,7 +219,7 @@ func (c *controller) processLoop() { // happen if the watch is closed and misses the delete event and we don't // notice the deletion until the subsequent re-list. type ResourceEventHandler interface { - OnAdd(obj interface{}) + OnAdd(obj interface{}, isInInitialList bool) OnUpdate(oldObj, newObj interface{}) OnDelete(obj interface{}) } @@ -220,6 +228,9 @@ type ResourceEventHandler interface { // as few of the notification functions as you want while still implementing // ResourceEventHandler. This adapter does not remove the prohibition against // modifying the objects. +// +// See ResourceEventHandlerDetailedFuncs if your use needs to propagate +// HasSynced. type ResourceEventHandlerFuncs struct { AddFunc func(obj interface{}) UpdateFunc func(oldObj, newObj interface{}) @@ -227,7 +238,7 @@ type ResourceEventHandlerFuncs struct { } // OnAdd calls AddFunc if it's not nil. -func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) { +func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}, isInInitialList bool) { if r.AddFunc != nil { r.AddFunc(obj) } @@ -247,6 +258,36 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { } } +// ResourceEventHandlerDetailedFuncs is exactly like ResourceEventHandlerFuncs +// except its AddFunc accepts the isInInitialList parameter, for propagating +// HasSynced. +type ResourceEventHandlerDetailedFuncs struct { + AddFunc func(obj interface{}, isInInitialList bool) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnAdd(obj interface{}, isInInitialList bool) { + if r.AddFunc != nil { + r.AddFunc(obj, isInInitialList) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + // FilteringResourceEventHandler applies the provided filter to all events coming // in, ensuring the appropriate nested handler method is invoked. An object // that starts passing the filter after an update is considered an add, and an @@ -258,11 +299,11 @@ type FilteringResourceEventHandler struct { } // OnAdd calls the nested handler only if the filter succeeds -func (r FilteringResourceEventHandler) OnAdd(obj interface{}) { +func (r FilteringResourceEventHandler) OnAdd(obj interface{}, isInInitialList bool) { if !r.FilterFunc(obj) { return } - r.Handler.OnAdd(obj) + r.Handler.OnAdd(obj, isInInitialList) } // OnUpdate ensures the proper handler is called depending on whether the filter matches @@ -273,7 +314,7 @@ func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { case newer && older: r.Handler.OnUpdate(oldObj, newObj) case newer && !older: - r.Handler.OnAdd(newObj) + r.Handler.OnAdd(newObj, false) case !newer && older: r.Handler.OnDelete(oldObj) default: @@ -401,6 +442,7 @@ func processDeltas( handler ResourceEventHandler, clientState Store, deltas Deltas, + isInInitialList bool, ) error { // from oldest to newest for _, d := range deltas { @@ -417,7 +459,7 @@ func processDeltas( if err := clientState.Add(obj); err != nil { return err } - handler.OnAdd(obj) + handler.OnAdd(obj, isInInitialList) } case Deleted: if err := clientState.Delete(obj); err != nil { @@ -466,9 +508,9 @@ func newInformer( FullResyncPeriod: resyncPeriod, RetryOnError: false, - Process: func(obj interface{}) error { + Process: func(obj interface{}, isInInitialList bool) error { if deltas, ok := obj.(Deltas); ok { - return processDeltas(h, clientState, deltas) + return processDeltas(h, clientState, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") }, diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 84f3ab9c..7160bb1e 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -300,6 +300,10 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { func (f *DeltaFIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *DeltaFIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } @@ -570,6 +574,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { f.cond.Wait() } + isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] depth := len(f.queue) @@ -595,7 +600,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"}) defer trace.LogIfLong(100 * time.Millisecond) } - err := process(item) + err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index 8f331378..dd13c4ea 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -25,7 +25,7 @@ import ( // PopProcessFunc is passed to Pop() method of Queue interface. // It is supposed to process the accumulator popped from the queue. -type PopProcessFunc func(interface{}) error +type PopProcessFunc func(obj interface{}, isInInitialList bool) error // ErrRequeue may be returned by a PopProcessFunc to safely requeue // the current item. The value of Err will be returned from Pop. @@ -82,9 +82,12 @@ type Queue interface { // Pop is helper function for popping from Queue. // WARNING: Do NOT use this function in non-test code to avoid races // unless you really really really really know what you are doing. +// +// NOTE: This function is deprecated and may be removed in the future without +// additional warning. func Pop(queue Queue) interface{} { var result interface{} - queue.Pop(func(obj interface{}) error { + queue.Pop(func(obj interface{}, isInInitialList bool) error { result = obj return nil }) @@ -149,6 +152,10 @@ func (f *FIFO) Close() { func (f *FIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *FIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } @@ -287,6 +294,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { f.cond.Wait() } + isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] if f.initialPopulationCount > 0 { @@ -298,7 +306,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { continue } delete(f.items, id) - err := process(item) + err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 9cd476be..2b335c10 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -23,6 +23,7 @@ import ( "io" "math/rand" "reflect" + "strings" "sync" "time" @@ -40,6 +41,7 @@ import ( "k8s.io/client-go/tools/pager" "k8s.io/klog/v2" "k8s.io/utils/clock" + "k8s.io/utils/pointer" "k8s.io/utils/trace" ) @@ -49,12 +51,11 @@ const defaultExpectedTypeName = "" type Reflector struct { // name identifies this reflector. By default it will be a file:line if possible. name string - // The name of the type we expect to place in the store. The name // will be the stringification of expectedGVK if provided, and the // stringification of expectedType otherwise. It is for display // only, and should not be used for parsing or comparison. - expectedTypeName string + typeDescription string // An example object of the type we expect to place in the store. // Only the type needs to be right, except that when that is // `unstructured.Unstructured` the object's `"apiVersion"` and @@ -66,17 +67,11 @@ type Reflector struct { store Store // listerWatcher is used to perform lists and watches. listerWatcher ListerWatcher - // backoff manages backoff of ListWatch backoffManager wait.BackoffManager // initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch. initConnBackoffManager wait.BackoffManager - // MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch. - MaxInternalErrorRetryDuration time.Duration - - resyncPeriod time.Duration - // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked - ShouldResync func() bool + resyncPeriod time.Duration // clock allows tests to manipulate time clock clock.Clock // paginatedResult defines whether pagination should be forced for list calls. @@ -91,6 +86,8 @@ type Reflector struct { isLastSyncResourceVersionUnavailable bool // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion lastSyncResourceVersionMutex sync.RWMutex + // Called whenever the ListAndWatch drops the connection with an error. + watchErrorHandler WatchErrorHandler // WatchListPageSize is the requested chunk size of initial and resync watch lists. // If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data // (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0") @@ -99,8 +96,19 @@ type Reflector struct { // etcd, which is significantly less efficient and may lead to serious performance and // scalability problems. WatchListPageSize int64 - // Called whenever the ListAndWatch drops the connection with an error. - watchErrorHandler WatchErrorHandler + // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked + ShouldResync func() bool + // MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch. + MaxInternalErrorRetryDuration time.Duration + // UseWatchList if turned on instructs the reflector to open a stream to bring data from the API server. + // Streaming has the primary advantage of using fewer server's resources to fetch data. + // + // The old behaviour establishes a LIST request which gets data in chunks. + // Paginated list is less efficient and depending on the actual size of objects + // might result in an increased memory consumption of the APIServer. + // + // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#design-details + UseWatchList bool } // ResourceVersionUpdater is an interface that allows store implementation to @@ -131,13 +139,13 @@ func DefaultWatchErrorHandler(r *Reflector, err error) { // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case err == io.EOF: // watch closed normally case err == io.ErrUnexpectedEOF: - klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err) + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.typeDescription, err) default: - utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err)) + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.typeDescription, err)) } } @@ -155,7 +163,40 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa return indexer, reflector } -// NewReflector creates a new Reflector object which will keep the +// NewReflector creates a new Reflector with its name defaulted to the closest source_file.go:line in the call stack +// that is outside this package. See NewReflectorWithOptions for further information. +func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{ResyncPeriod: resyncPeriod}) +} + +// NewNamedReflector creates a new Reflector with the specified name. See NewReflectorWithOptions for further +// information. +func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{Name: name, ResyncPeriod: resyncPeriod}) +} + +// ReflectorOptions configures a Reflector. +type ReflectorOptions struct { + // Name is the Reflector's name. If unset/unspecified, the name defaults to the closest source_file.go:line + // in the call stack that is outside this package. + Name string + + // TypeDescription is the Reflector's type description. If unset/unspecified, the type description is defaulted + // using the following rules: if the expectedType passed to NewReflectorWithOptions was nil, the type description is + // "". If the expectedType is an instance of *unstructured.Unstructured and its apiVersion and kind fields + // are set, the type description is the string encoding of those. Otherwise, the type description is set to the + // go type of expectedType.. + TypeDescription string + + // ResyncPeriod is the Reflector's resync period. If unset/unspecified, the resync period defaults to 0 + // (do not resync). + ResyncPeriod time.Duration + + // Clock allows tests to control time. If unset defaults to clock.RealClock{} + Clock clock.Clock +} + +// NewReflectorWithOptions creates a new Reflector object which will keep the // given store up to date with the server's contents for the given // resource. Reflector promises to only put things in the store that // have the type of expectedType, unless expectedType is nil. If @@ -165,49 +206,74 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa // "yes". This enables you to use reflectors to periodically process // everything as well as incrementally processing the things that // change. -func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) -} - -// NewNamedReflector same as NewReflector, but with a specified name for logging -func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - realClock := &clock.RealClock{} +func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store Store, options ReflectorOptions) *Reflector { + reflectorClock := options.Clock + if reflectorClock == nil { + reflectorClock = clock.RealClock{} + } r := &Reflector{ - name: name, - listerWatcher: lw, - store: store, + name: options.Name, + resyncPeriod: options.ResyncPeriod, + typeDescription: options.TypeDescription, + listerWatcher: lw, + store: store, // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. - backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - resyncPeriod: resyncPeriod, - clock: realClock, + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), + initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), + clock: reflectorClock, watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + expectedType: reflect.TypeOf(expectedType), } - r.setExpectedType(expectedType) + + if r.name == "" { + r.name = naming.GetNameFromCallsite(internalPackages...) + } + + if r.typeDescription == "" { + r.typeDescription = getTypeDescriptionFromObject(expectedType) + } + + if r.expectedGVK == nil { + r.expectedGVK = getExpectedGVKFromObject(expectedType) + } + return r } -func (r *Reflector) setExpectedType(expectedType interface{}) { - r.expectedType = reflect.TypeOf(expectedType) - if r.expectedType == nil { - r.expectedTypeName = defaultExpectedTypeName - return +func getTypeDescriptionFromObject(expectedType interface{}) string { + if expectedType == nil { + return defaultExpectedTypeName } - r.expectedTypeName = r.expectedType.String() + reflectDescription := reflect.TypeOf(expectedType).String() - if obj, ok := expectedType.(*unstructured.Unstructured); ok { - // Use gvk to check that watch event objects are of the desired type. - gvk := obj.GroupVersionKind() - if gvk.Empty() { - klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name) - return - } - r.expectedGVK = &gvk - r.expectedTypeName = gvk.String() + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return reflectDescription } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return reflectDescription + } + + return gvk.String() +} + +func getExpectedGVKFromObject(expectedType interface{}) *schema.GroupVersionKind { + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return nil + } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return nil + } + + return &gvk } // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common @@ -218,13 +284,13 @@ var internalPackages = []string{"client-go/tools/cache/"} // objects and subsequent deltas. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) wait.BackoffUntil(func() { if err := r.ListAndWatch(stopCh); err != nil { r.watchErrorHandler(r, err) } }, r.backoffManager, true, stopCh) - klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) } var ( @@ -254,42 +320,75 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { - klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name) + klog.V(3).Infof("Listing and watching %v from %s", r.typeDescription, r.name) + var err error + var w watch.Interface + fallbackToList := !r.UseWatchList - err := r.list(stopCh) - if err != nil { - return err + if r.UseWatchList { + w, err = r.watchList(stopCh) + if w == nil && err == nil { + // stopCh was closed + return nil + } + if err != nil { + if !apierrors.IsInvalid(err) { + return err + } + klog.Warning("the watch-list feature is not supported by the server, falling back to the previous LIST/WATCH semantic") + fallbackToList = true + // Ensure that we won't accidentally pass some garbage down the watch. + w = nil + } + } + + if fallbackToList { + err = r.list(stopCh) + if err != nil { + return err + } } resyncerrc := make(chan error, 1) cancelCh := make(chan struct{}) defer close(cancelCh) - go func() { - resyncCh, cleanup := r.resyncChan() - defer func() { - cleanup() // Call the last one written into cleanup - }() - for { - select { - case <-resyncCh: - case <-stopCh: - return - case <-cancelCh: + go r.startResync(stopCh, cancelCh, resyncerrc) + return r.watch(w, stopCh, resyncerrc) +} + +// startResync periodically calls r.store.Resync() method. +// Note that this method is blocking and should be +// called in a separate goroutine. +func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{}, resyncerrc chan error) { + resyncCh, cleanup := r.resyncChan() + defer func() { + cleanup() // Call the last one written into cleanup + }() + for { + select { + case <-resyncCh: + case <-stopCh: + return + case <-cancelCh: + return + } + if r.ShouldResync == nil || r.ShouldResync() { + klog.V(4).Infof("%s: forcing resync", r.name) + if err := r.store.Resync(); err != nil { + resyncerrc <- err return } - if r.ShouldResync == nil || r.ShouldResync() { - klog.V(4).Infof("%s: forcing resync", r.name) - if err := r.store.Resync(); err != nil { - resyncerrc <- err - return - } - } - cleanup() - resyncCh, cleanup = r.resyncChan() } - }() + cleanup() + resyncCh, cleanup = r.resyncChan() + } +} +// watch simply starts a watch request with the server. +func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error { + var err error retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock) + for { // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors select { @@ -298,35 +397,41 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { default: } - timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) - options := metav1.ListOptions{ - ResourceVersion: r.LastSyncResourceVersion(), - // We want to avoid situations of hanging watchers. Stop any watchers that do not - // receive any events within the timeout window. - TimeoutSeconds: &timeoutSeconds, - // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. - // Reflector doesn't assume bookmarks are returned at all (if the server do not support - // watch bookmarks, it will ignore this field). - AllowWatchBookmarks: true, - } - // start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent start := r.clock.Now() - w, err := r.listerWatcher.Watch(options) - if err != nil { - // If this is "connection refused" error, it means that most likely apiserver is not responsive. - // It doesn't make sense to re-list all objects because most likely we will be able to restart - // watch where we ended. - // If that's the case begin exponentially backing off and resend watch request. - // Do the same for "429" errors. - if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { - <-r.initConnBackoffManager.Backoff().C() - continue + + if w == nil { + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: r.LastSyncResourceVersion(), + // We want to avoid situations of hanging watchers. Stop any watchers that do not + // receive any events within the timeout window. + TimeoutSeconds: &timeoutSeconds, + // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. + // Reflector doesn't assume bookmarks are returned at all (if the server do not support + // watch bookmarks, it will ignore this field). + AllowWatchBookmarks: true, + } + + w, err = r.listerWatcher.Watch(options) + if err != nil { + if canRetry := isWatchErrorRetriable(err); canRetry { + klog.V(4).Infof("%s: watch of %v returned %v - backing off", r.name, r.typeDescription, err) + select { + case <-stopCh: + return nil + case <-r.initConnBackoffManager.Backoff().C(): + continue + } + } + return err } - return err } - err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.expectedTypeName, r.setLastSyncResourceVersion, r.clock, resyncerrc, stopCh) + err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, nil, r.clock, resyncerrc, stopCh) + // Ensure that watch will not be reused across iterations. + w.Stop() + w = nil retry.After(err) if err != nil { if err != errorStopRequested { @@ -335,16 +440,20 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case apierrors.IsTooManyRequests(err): - klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName) - <-r.initConnBackoffManager.Backoff().C() - continue + klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.typeDescription) + select { + case <-stopCh: + return nil + case <-r.initConnBackoffManager.Backoff().C(): + continue + } case apierrors.IsInternalError(err) && retry.ShouldRetry(): - klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.expectedTypeName, err) + klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.typeDescription, err) continue default: - klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.typeDescription, err) } } return nil @@ -421,8 +530,8 @@ func (r *Reflector) list(stopCh <-chan struct{}) error { } initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err}) if err != nil { - klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err) - return fmt.Errorf("failed to list %v: %w", r.expectedTypeName, err) + klog.Warningf("%s: failed to list %v: %v", r.name, r.typeDescription, err) + return fmt.Errorf("failed to list %v: %w", r.typeDescription, err) } // We check if the list was paginated and if so set the paginatedResult based on that. @@ -460,6 +569,114 @@ func (r *Reflector) list(stopCh <-chan struct{}) error { return nil } +// watchList establishes a stream to get a consistent snapshot of data +// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal +// +// case 1: start at Most Recent (RV="", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) +// Establishes a consistent stream with the server. +// That means the returned data is consistent, as if, served directly from etcd via a quorum read. +// It begins with synthetic "Added" events of all resources up to the most recent ResourceVersion. +// It ends with a synthetic "Bookmark" event containing the most recent ResourceVersion. +// After receiving a "Bookmark" event the reflector is considered to be synchronized. +// It replaces its internal store with the collected items and +// reuses the current watch requests for getting further events. +// +// case 2: start at Exact (RV>"0", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) +// Establishes a stream with the server at the provided resource version. +// To establish the initial state the server begins with synthetic "Added" events. +// It ends with a synthetic "Bookmark" event containing the provided or newer resource version. +// After receiving a "Bookmark" event the reflector is considered to be synchronized. +// It replaces its internal store with the collected items and +// reuses the current watch requests for getting further events. +func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { + var w watch.Interface + var err error + var temporaryStore Store + var resourceVersion string + // TODO(#115478): see if this function could be turned + // into a method and see if error handling + // could be unified with the r.watch method + isErrorRetriableWithSideEffectsFn := func(err error) bool { + if canRetry := isWatchErrorRetriable(err); canRetry { + klog.V(2).Infof("%s: watch-list of %v returned %v - backing off", r.name, r.typeDescription, err) + <-r.initConnBackoffManager.Backoff().C() + return true + } + if isExpiredError(err) || isTooLargeResourceVersionError(err) { + // we tried to re-establish a watch request but the provided RV + // has either expired or it is greater than the server knows about. + // In that case we reset the RV and + // try to get a consistent snapshot from the watch cache (case 1) + r.setIsLastSyncResourceVersionUnavailable(true) + return true + } + return false + } + + initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name}) + defer initTrace.LogIfLong(10 * time.Second) + for { + select { + case <-stopCh: + return nil, nil + default: + } + + resourceVersion = "" + lastKnownRV := r.rewatchResourceVersion() + temporaryStore = NewStore(DeletionHandlingMetaNamespaceKeyFunc) + // TODO(#115478): large "list", slow clients, slow network, p&f + // might slow down streaming and eventually fail. + // maybe in such a case we should retry with an increased timeout? + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: lastKnownRV, + AllowWatchBookmarks: true, + SendInitialEvents: pointer.Bool(true), + ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan, + TimeoutSeconds: &timeoutSeconds, + } + start := r.clock.Now() + + w, err = r.listerWatcher.Watch(options) + if err != nil { + if isErrorRetriableWithSideEffectsFn(err) { + continue + } + return nil, err + } + bookmarkReceived := pointer.Bool(false) + err = watchHandler(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, + func(rv string) { resourceVersion = rv }, + bookmarkReceived, + r.clock, make(chan error), stopCh) + if err != nil { + w.Stop() // stop and retry with clean state + if err == errorStopRequested { + return nil, nil + } + if isErrorRetriableWithSideEffectsFn(err) { + continue + } + return nil, err + } + if *bookmarkReceived { + break + } + } + // We successfully got initial state from watch-list confirmed by the + // "k8s.io/initial-events-end" bookmark. + initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())}) + r.setIsLastSyncResourceVersionUnavailable(false) + if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { + return nil, fmt.Errorf("unable to sync watch-list result: %v", err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + + return w, nil +} + // syncWith replaces the store's items with the given list. func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error { found := make([]interface{}, 0, len(items)) @@ -478,15 +695,17 @@ func watchHandler(start time.Time, name string, expectedTypeName string, setLastSyncResourceVersion func(string), + exitOnInitialEventsEndBookmark *bool, clock clock.Clock, errc chan error, stopCh <-chan struct{}, ) error { eventCount := 0 - - // Stopping the watcher should be idempotent and if we return from this function there's no way - // we're coming back in with the same watch interface. - defer w.Stop() + if exitOnInitialEventsEndBookmark != nil { + // set it to false just in case somebody + // made it positive + *exitOnInitialEventsEndBookmark = false + } loop: for { @@ -541,6 +760,11 @@ loop: } case watch.Bookmark: // A `Bookmark` means watch has synced here, just update the resourceVersion + if _, ok := meta.GetAnnotations()["k8s.io/initial-events-end"]; ok { + if exitOnInitialEventsEndBookmark != nil { + *exitOnInitialEventsEndBookmark = true + } + } default: utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) } @@ -549,6 +773,11 @@ loop: rvu.UpdateResourceVersion(resourceVersion) } eventCount++ + if exitOnInitialEventsEndBookmark != nil && *exitOnInitialEventsEndBookmark { + watchDuration := clock.Since(start) + klog.V(4).Infof("exiting %v Watch because received the bookmark that marks the end of initial events stream, total %v items received in %v", name, eventCount, watchDuration) + return nil + } } } @@ -597,6 +826,18 @@ func (r *Reflector) relistResourceVersion() string { return r.lastSyncResourceVersion } +// rewatchResourceVersion determines the resource version the reflector should start streaming from. +func (r *Reflector) rewatchResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + if r.isLastSyncResourceVersionUnavailable { + // initial stream should return data at the most recent resource version. + // the returned data must be consistent i.e. as if served from etcd via a quorum read + return "" + } + return r.lastSyncResourceVersion +} + // setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned // "expired" or "too large resource version" error. func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) { @@ -635,5 +876,25 @@ func isTooLargeResourceVersionError(err error) bool { return true } } + + // Matches the message returned by api server before 1.17.0 + if strings.Contains(apierr.Status().Message, "Too large resource version") { + return true + } + + return false +} + +// isWatchErrorRetriable determines if it is safe to retry +// a watch error retrieved from the server. +func isWatchErrorRetriable(err error) bool { + // If this is "connection refused" error, it means that most likely apiserver is not responsive. + // It doesn't make sense to re-list all objects because most likely we will be able to restart + // watch where we ended. + // If that's the case begin exponentially backing off and resend watch request. + // Do the same for "429" errors. + if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { + return true + } return false } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 4979642c..a889fdbc 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache/synctrack" "k8s.io/utils/buffer" "k8s.io/utils/clock" @@ -132,11 +133,13 @@ import ( // state, except that its ResourceVersion is replaced with a // ResourceVersion in which the object is actually absent. type SharedInformer interface { - // AddEventHandler adds an event handler to the shared informer using the shared informer's resync - // period. Events to a single handler are delivered sequentially, but there is no coordination - // between different handlers. - // It returns a registration handle for the handler that can be used to remove - // the handler again. + // AddEventHandler adds an event handler to the shared informer using + // the shared informer's resync period. Events to a single handler are + // delivered sequentially, but there is no coordination between + // different handlers. + // It returns a registration handle for the handler that can be used to + // remove the handler again, or to tell if the handler is synced (has + // seen every item in the initial list). AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) // AddEventHandlerWithResyncPeriod adds an event handler to the // shared informer with the requested resync period; zero means @@ -169,6 +172,10 @@ type SharedInformer interface { // HasSynced returns true if the shared informer's store has been // informed by at least one full LIST of the authoritative state // of the informer's object collection. This is unrelated to "resync". + // + // Note that this doesn't tell you if an individual handler is synced!! + // For that, please call HasSynced on the handle returned by + // AddEventHandler. HasSynced() bool // LastSyncResourceVersion is the resource version observed when last synced with the underlying // store. The value returned is not synchronized with access to the underlying store and is not @@ -210,7 +217,14 @@ type SharedInformer interface { // Opaque interface representing the registration of ResourceEventHandler for // a SharedInformer. Must be supplied back to the same SharedInformer's // `RemoveEventHandler` to unregister the handlers. -type ResourceEventHandlerRegistration interface{} +// +// Also used to tell if the handler is synced (has had all items in the initial +// list delivered). +type ResourceEventHandlerRegistration interface { + // HasSynced reports if both the parent has synced and all pre-sync + // events have been delivered. + HasSynced() bool +} // SharedIndexInformer provides add and get Indexers ability based on SharedInformer. type SharedIndexInformer interface { @@ -220,14 +234,26 @@ type SharedIndexInformer interface { GetIndexer() Indexer } -// NewSharedInformer creates a new instance for the listwatcher. +// NewSharedInformer creates a new instance for the ListerWatcher. See NewSharedIndexInformerWithOptions for full details. func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer { return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{}) } -// NewSharedIndexInformer creates a new instance for the listwatcher. -// The created informer will not do resyncs if the given -// defaultEventHandlerResyncPeriod is zero. Otherwise: for each +// NewSharedIndexInformer creates a new instance for the ListerWatcher and specified Indexers. See +// NewSharedIndexInformerWithOptions for full details. +func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { + return NewSharedIndexInformerWithOptions( + lw, + exampleObject, + SharedIndexInformerOptions{ + ResyncPeriod: defaultEventHandlerResyncPeriod, + Indexers: indexers, + }, + ) +} + +// NewSharedIndexInformerWithOptions creates a new instance for the ListerWatcher. +// The created informer will not do resyncs if options.ResyncPeriod is zero. Otherwise: for each // handler that with a non-zero requested resync period, whether added // before or after the informer starts, the nominal resync period is // the requested resync period rounded up to a multiple of the @@ -235,21 +261,36 @@ func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEv // checking period is established when the informer starts running, // and is the maximum of (a) the minimum of the resync periods // requested before the informer starts and the -// defaultEventHandlerResyncPeriod given here and (b) the constant +// options.ResyncPeriod given here and (b) the constant // `minimumResyncPeriod` defined in this file. -func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { +func NewSharedIndexInformerWithOptions(lw ListerWatcher, exampleObject runtime.Object, options SharedIndexInformerOptions) SharedIndexInformer { realClock := &clock.RealClock{} - sharedIndexInformer := &sharedIndexInformer{ + + return &sharedIndexInformer{ + indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers), processor: &sharedProcessor{clock: realClock}, - indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), listerWatcher: lw, objectType: exampleObject, - resyncCheckPeriod: defaultEventHandlerResyncPeriod, - defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, - cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), + objectDescription: options.ObjectDescription, + resyncCheckPeriod: options.ResyncPeriod, + defaultEventHandlerResyncPeriod: options.ResyncPeriod, clock: realClock, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), } - return sharedIndexInformer +} + +// SharedIndexInformerOptions configures a sharedIndexInformer. +type SharedIndexInformerOptions struct { + // ResyncPeriod is the default event handler resync period and resync check + // period. If unset/unspecified, these are defaulted to 0 (do not resync). + ResyncPeriod time.Duration + + // Indexers is the sharedIndexInformer's indexers. If unset/unspecified, no indexers are configured. + Indexers Indexers + + // ObjectDescription is the sharedIndexInformer's object description. This is passed through to the + // underlying Reflector's type description. + ObjectDescription string } // InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. @@ -323,12 +364,13 @@ type sharedIndexInformer struct { listerWatcher ListerWatcher - // objectType is an example object of the type this informer is - // expected to handle. Only the type needs to be right, except - // that when that is `unstructured.Unstructured` the object's - // `"apiVersion"` and `"kind"` must also be right. + // objectType is an example object of the type this informer is expected to handle. If set, an event + // with an object with a mismatching type is dropped instead of being delivered to listeners. objectType runtime.Object + // objectDescription is the description of this informer's objects. This typically defaults to + objectDescription string + // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call // shouldResync to check if any of our listeners need a resync. resyncCheckPeriod time.Duration @@ -378,7 +420,8 @@ type updateNotification struct { } type addNotification struct { - newObj interface{} + newObj interface{} + isInInitialList bool } type deleteNotification struct { @@ -423,12 +466,13 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { }) cfg := &Config{ - Queue: fifo, - ListerWatcher: s.listerWatcher, - ObjectType: s.objectType, - FullResyncPeriod: s.resyncCheckPeriod, - RetryOnError: false, - ShouldResync: s.processor.shouldResync, + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + ObjectDescription: s.objectDescription, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, Process: s.HandleDeltas, WatchErrorHandler: s.watchErrorHandler, @@ -557,7 +601,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv } } - listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize) + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize, s.HasSynced) if !s.started { return s.processor.addListener(listener), nil @@ -573,27 +617,35 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv handle := s.processor.addListener(listener) for _, item := range s.indexer.List() { - listener.add(addNotification{newObj: item}) + // Note that we enqueue these notifications with the lock held + // and before returning the handle. That means there is never a + // chance for anyone to call the handle's HasSynced method in a + // state when it would falsely return true (i.e., when the + // shared informer is synced but it has not observed an Add + // with isInitialList being true, nor when the thread + // processing notifications somehow goes faster than this + // thread adding them and the counter is temporarily zero). + listener.add(addNotification{newObj: item, isInInitialList: true}) } return handle, nil } -func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { +func (s *sharedIndexInformer) HandleDeltas(obj interface{}, isInInitialList bool) error { s.blockDeltas.Lock() defer s.blockDeltas.Unlock() if deltas, ok := obj.(Deltas); ok { - return processDeltas(s, s.indexer, deltas) + return processDeltas(s, s.indexer, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") } // Conforms to ResourceEventHandler -func (s *sharedIndexInformer) OnAdd(obj interface{}) { +func (s *sharedIndexInformer) OnAdd(obj interface{}, isInInitialList bool) { // Invocation of this function is locked under s.blockDeltas, so it is // save to distribute the notification s.cacheMutationDetector.AddObject(obj) - s.processor.distribute(addNotification{newObj: obj}, false) + s.processor.distribute(addNotification{newObj: obj, isInInitialList: isInInitialList}, false) } // Conforms to ResourceEventHandler @@ -815,6 +867,8 @@ type processorListener struct { handler ResourceEventHandler + syncTracker *synctrack.SingleFileTracker + // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed. // There is one per listener, but a failing/stalled listener will have infinite pendingNotifications // added until we OOM. @@ -845,11 +899,18 @@ type processorListener struct { resyncLock sync.Mutex } -func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener { +// HasSynced returns true if the source informer has synced, and all +// corresponding events have been delivered. +func (p *processorListener) HasSynced() bool { + return p.syncTracker.HasSynced() +} + +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int, hasSynced func() bool) *processorListener { ret := &processorListener{ nextCh: make(chan interface{}), addCh: make(chan interface{}), handler: handler, + syncTracker: &synctrack.SingleFileTracker{UpstreamHasSynced: hasSynced}, pendingNotifications: *buffer.NewRingGrowing(bufferSize), requestedResyncPeriod: requestedResyncPeriod, resyncPeriod: resyncPeriod, @@ -861,6 +922,9 @@ func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, res } func (p *processorListener) add(notification interface{}) { + if a, ok := notification.(addNotification); ok && a.isInInitialList { + p.syncTracker.Start() + } p.addCh <- notification } @@ -906,7 +970,10 @@ func (p *processorListener) run() { case updateNotification: p.handler.OnUpdate(notification.oldObj, notification.newObj) case addNotification: - p.handler.OnAdd(notification.newObj) + p.handler.OnAdd(notification.newObj, notification.isInInitialList) + if notification.isInInitialList { + p.syncTracker.Finished() + } case deleteNotification: p.handler.OnDelete(notification.oldObj) default: diff --git a/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go b/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go new file mode 100644 index 00000000..ce51da9a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package synctrack + +import ( + "sync" + "sync/atomic" +) + +// Lazy defers the computation of `Evaluate` to when it is necessary. It is +// possible that Evaluate will be called in parallel from multiple goroutines. +type Lazy[T any] struct { + Evaluate func() (T, error) + + cache atomic.Pointer[cacheEntry[T]] +} + +type cacheEntry[T any] struct { + eval func() (T, error) + lock sync.RWMutex + result *T +} + +func (e *cacheEntry[T]) get() (T, error) { + if cur := func() *T { + e.lock.RLock() + defer e.lock.RUnlock() + return e.result + }(); cur != nil { + return *cur, nil + } + + e.lock.Lock() + defer e.lock.Unlock() + if e.result != nil { + return *e.result, nil + } + r, err := e.eval() + if err == nil { + e.result = &r + } + return r, err +} + +func (z *Lazy[T]) newCacheEntry() *cacheEntry[T] { + return &cacheEntry[T]{eval: z.Evaluate} +} + +// Notify should be called when something has changed necessitating a new call +// to Evaluate. +func (z *Lazy[T]) Notify() { z.cache.Swap(z.newCacheEntry()) } + +// Get should be called to get the current result of a call to Evaluate. If the +// current cached value is stale (due to a call to Notify), then Evaluate will +// be called synchronously. If subsequent calls to Get happen (without another +// Notify), they will all wait for the same return value. +// +// Error returns are not cached and will cause multiple calls to evaluate! +func (z *Lazy[T]) Get() (T, error) { + e := z.cache.Load() + if e == nil { + // Since we don't force a constructor, nil is a possible value. + // If multiple Gets race to set this, the swap makes sure only + // one wins. + z.cache.CompareAndSwap(nil, z.newCacheEntry()) + e = z.cache.Load() + } + return e.get() +} diff --git a/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go b/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go new file mode 100644 index 00000000..3fa2beb6 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go @@ -0,0 +1,120 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package synctrack contains utilities for helping controllers track whether +// they are "synced" or not, that is, whether they have processed all items +// from the informer's initial list. +package synctrack + +import ( + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// AsyncTracker helps propagate HasSynced in the face of multiple worker threads. +type AsyncTracker[T comparable] struct { + UpstreamHasSynced func() bool + + lock sync.Mutex + waiting sets.Set[T] +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *AsyncTracker[T]) Start(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting == nil { + t.waiting = sets.New[T](key) + } else { + t.waiting.Insert(key) + } +} + +// Finished should be called when finished processing a key which was part of +// the initial list. Since keys are tracked individually, nothing bad happens +// if you call Finished without a corresponding call to Start. This makes it +// easier to use this in combination with e.g. queues which don't make it easy +// to plumb through the isInInitialList boolean. +func (t *AsyncTracker[T]) Finished(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting != nil { + t.waiting.Delete(key) + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *AsyncTracker[T]) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we can't hold our lock while + // waiting on that or a user is likely to get a deadlock. + if !t.UpstreamHasSynced() { + return false + } + t.lock.Lock() + defer t.lock.Unlock() + return t.waiting.Len() == 0 +} + +// SingleFileTracker helps propagate HasSynced when events are processed in +// order (i.e. via a queue). +type SingleFileTracker struct { + // Important: count is used with atomic operations so it must be 64-bit + // aligned, otherwise atomic operations will panic. Having it at the top of + // the struct will guarantee that, even on 32-bit arches. + // See https://pkg.go.dev/sync/atomic#pkg-note-BUG for more information. + count int64 + + UpstreamHasSynced func() bool +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *SingleFileTracker) Start() { + atomic.AddInt64(&t.count, 1) +} + +// Finished should be called when finished processing a key which was part of +// the initial list. You must never call Finished() before (or without) its +// corresponding Start(), that is a logic error that could cause HasSynced to +// return a wrong value. To help you notice this should it happen, Finished() +// will panic if the internal counter goes negative. +func (t *SingleFileTracker) Finished() { + result := atomic.AddInt64(&t.count, -1) + if result < 0 { + panic("synctrack: negative counter; this logic error means HasSynced may return incorrect value") + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *SingleFileTracker) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we don't want to then act on a + // stale count value. + if !t.UpstreamHasSynced() { + return false + } + return atomic.LoadInt64(&t.count) <= 0 +} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index c64ba9b2..940e7161 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -64,9 +64,8 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" rl "k8s.io/client-go/tools/leaderelection/resourcelock" - "k8s.io/utils/clock" - "k8s.io/klog/v2" + "k8s.io/utils/clock" ) const ( @@ -199,9 +198,7 @@ type LeaderElector struct { // stopped holding the leader lease func (le *LeaderElector) Run(ctx context.Context) { defer runtime.HandleCrash() - defer func() { - le.config.Callbacks.OnStoppedLeading() - }() + defer le.config.Callbacks.OnStoppedLeading() if !le.acquire(ctx) { return // ctx signalled done @@ -263,6 +260,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool { // renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done. func (le *LeaderElector) renew(ctx context.Context) { + defer le.config.Lock.RecordEvent("stopped leading") ctx, cancel := context.WithCancel(ctx) defer cancel() wait.Until(func() { @@ -278,7 +276,6 @@ func (le *LeaderElector) renew(ctx context.Context) { klog.V(5).Infof("successfully renewed lease %v", desc) return } - le.config.Lock.RecordEvent("stopped leading") le.metrics.leaderOff(le.config.Name) klog.Infof("failed to renew lease %v: %v", desc, err) cancel() @@ -295,7 +292,7 @@ func (le *LeaderElector) release() bool { if !le.IsLeader() { return true } - now := metav1.Now() + now := metav1.NewTime(le.clock.Now()) leaderElectionRecord := rl.LeaderElectionRecord{ LeaderTransitions: le.observedRecord.LeaderTransitions, LeaseDurationSeconds: 1, @@ -315,7 +312,7 @@ func (le *LeaderElector) release() bool { // else it tries to renew the lease if it has already been acquired. Returns true // on success else returns false. func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool { - now := metav1.Now() + now := metav1.NewTime(le.clock.Now()) leaderElectionRecord := rl.LeaderElectionRecord{ HolderIdentity: le.config.Lock.Identity(), LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second), @@ -347,7 +344,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool { le.observedRawRecord = oldLeaderElectionRawRecord } if len(oldLeaderElectionRecord.HolderIdentity) > 0 && - le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && + le.observedTime.Add(time.Second*time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).After(now.Time) && !le.IsLeader() { klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) return false diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go index c6e23bda..05b5b202 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go @@ -68,7 +68,7 @@ const ( // name: '*' // namespace: kube-system EndpointsLeasesResourceLock = "endpointsleases" - // When using EndpointsLeasesResourceLock, you need to ensure that + // When using ConfigMapsLeasesResourceLock, you need to ensure that // API Priority & Fairness is configured with non-default flow-schema // that will catch the necessary operations on leader-election related // configmap objects. diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 185ef0e5..8a9d7d60 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -117,10 +117,10 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec r.LeaderTransitions = int(*spec.LeaseTransitions) } if spec.AcquireTime != nil { - r.AcquireTime = metav1.Time{spec.AcquireTime.Time} + r.AcquireTime = metav1.Time{Time: spec.AcquireTime.Time} } if spec.RenewTime != nil { - r.RenewTime = metav1.Time{spec.RenewTime.Time} + r.RenewTime = metav1.Time{Time: spec.RenewTime.Time} } return &r @@ -132,8 +132,8 @@ func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.L return coordinationv1.LeaseSpec{ HolderIdentity: &ler.HolderIdentity, LeaseDurationSeconds: &leaseDurationSeconds, - AcquireTime: &metav1.MicroTime{ler.AcquireTime.Time}, - RenewTime: &metav1.MicroTime{ler.RenewTime.Time}, + AcquireTime: &metav1.MicroTime{Time: ler.AcquireTime.Time}, + RenewTime: &metav1.MicroTime{Time: ler.RenewTime.Time}, LeaseTransitions: &leaseTransitions, } } diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go index 6c684c7f..f36430dc 100644 --- a/vendor/k8s.io/client-go/tools/metrics/metrics.go +++ b/vendor/k8s.io/client-go/tools/metrics/metrics.go @@ -58,6 +58,12 @@ type CallsMetric interface { Increment(exitCode int, callStatus string) } +// RetryMetric counts the number of retries sent to the server +// partitioned by code, method, and host. +type RetryMetric interface { + IncrementRetry(ctx context.Context, code string, method string, host string) +} + var ( // ClientCertExpiry is the expiry time of a client certificate ClientCertExpiry ExpiryMetric = noopExpiry{} @@ -76,6 +82,9 @@ var ( // ExecPluginCalls is the number of calls made to an exec plugin, partitioned by // exit code and call status. ExecPluginCalls CallsMetric = noopCalls{} + // RequestRetry is the retry metric that tracks the number of + // retries sent to the server. + RequestRetry RetryMetric = noopRetry{} ) // RegisterOpts contains all the metrics to register. Metrics may be nil. @@ -88,6 +97,7 @@ type RegisterOpts struct { RateLimiterLatency LatencyMetric RequestResult ResultMetric ExecPluginCalls CallsMetric + RequestRetry RetryMetric } // Register registers metrics for the rest client to use. This can @@ -118,6 +128,9 @@ func Register(opts RegisterOpts) { if opts.ExecPluginCalls != nil { ExecPluginCalls = opts.ExecPluginCalls } + if opts.RequestRetry != nil { + RequestRetry = opts.RequestRetry + } }) } @@ -144,3 +157,7 @@ func (noopResult) Increment(context.Context, string, string, string) {} type noopCalls struct{} func (noopCalls) Increment(int, string) {} + +type noopRetry struct{} + +func (noopRetry) IncrementRetry(context.Context, string, string, string) {} diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index 92660597..1582e8ee 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -17,6 +17,7 @@ limitations under the License. package record import ( + "context" "fmt" "math/rand" "time" @@ -132,7 +133,9 @@ type EventBroadcaster interface { // with the event source set to the given event source. NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder - // Shutdown shuts down the broadcaster + // Shutdown shuts down the broadcaster. Once the broadcaster is shut + // down, it will only try to record an event in a sink once before + // giving up on it with an error message. Shutdown() } @@ -157,31 +160,34 @@ func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, re // Creates a new event broadcaster. func NewBroadcaster() EventBroadcaster { - return &eventBroadcasterImpl{ - Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), - sleepDuration: defaultSleepDuration, - } + return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration) } func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { - return &eventBroadcasterImpl{ - Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), - sleepDuration: sleepDuration, - } + return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration) } func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster { - return &eventBroadcasterImpl{ - Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), - sleepDuration: defaultSleepDuration, - options: options, + eventBroadcaster := newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration) + eventBroadcaster.options = options + return eventBroadcaster +} + +func newEventBroadcaster(broadcaster *watch.Broadcaster, sleepDuration time.Duration) *eventBroadcasterImpl { + eventBroadcaster := &eventBroadcasterImpl{ + Broadcaster: broadcaster, + sleepDuration: sleepDuration, } + eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(context.Background()) + return eventBroadcaster } type eventBroadcasterImpl struct { *watch.Broadcaster - sleepDuration time.Duration - options CorrelatorOptions + sleepDuration time.Duration + options CorrelatorOptions + cancelationCtx context.Context + cancel func() } // StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. @@ -191,15 +197,16 @@ func (e *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interf eventCorrelator := NewEventCorrelatorWithOptions(e.options) return e.StartEventWatcher( func(event *v1.Event) { - recordToSink(sink, event, eventCorrelator, e.sleepDuration) + e.recordToSink(sink, event, eventCorrelator) }) } func (e *eventBroadcasterImpl) Shutdown() { e.Broadcaster.Shutdown() + e.cancel() } -func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, sleepDuration time.Duration) { +func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator) { // Make a copy before modification, because there could be multiple listeners. // Events are safe to copy like this. eventCopy := *event @@ -221,12 +228,18 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) break } + // Randomize the first sleep so that various clients won't all be // synced up if the master goes down. + delay := e.sleepDuration if tries == 1 { - time.Sleep(time.Duration(float64(sleepDuration) * rand.Float64())) - } else { - time.Sleep(sleepDuration) + delay = time.Duration(float64(delay) * rand.Float64()) + } + select { + case <-e.cancelationCtx.Done(): + klog.Errorf("Unable to write event '%#v' (broadcaster is shut down)", event) + return + case <-time.After(delay): } } } diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go index 0b3f344a..fda4ad8f 100644 --- a/vendor/k8s.io/client-go/tools/record/fake.go +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -41,20 +41,31 @@ func objectString(object runtime.Object, includeObject bool) string { ) } -func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { - if f.Events != nil { - f.Events <- fmt.Sprintf("%s %s %s%s", eventtype, reason, message, objectString(object, f.IncludeObject)) +func annotationsString(annotations map[string]string) string { + if len(annotations) == 0 { + return "" + } else { + return " " + fmt.Sprint(annotations) } } -func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { +func (f *FakeRecorder) writeEvent(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { if f.Events != nil { - f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + objectString(object, f.IncludeObject) + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + + objectString(object, f.IncludeObject) + annotationsString(annotations) } } +func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { + f.writeEvent(object, nil, eventtype, reason, "%s", message) +} + +func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + f.writeEvent(object, nil, eventtype, reason, messageFmt, args...) +} + func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - f.Eventf(object, eventtype, reason, messageFmt, args...) + f.writeEvent(object, annotations, eventtype, reason, messageFmt, args...) } // NewFakeRecorder creates new fake event recorder with event channel with diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 4ef02f09..91e17127 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -213,7 +213,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a if err := os.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil { return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err) } - if err := os.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil { + if err := os.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0600); err != nil { return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err) } } diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index 26eacc2b..c1df7203 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -33,38 +33,81 @@ type DelayingInterface interface { AddAfter(item interface{}, duration time.Duration) } +// DelayingQueueConfig specifies optional configurations to customize a DelayingInterface. +type DelayingQueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock optionally allows injecting a real or fake clock for testing purposes. + Clock clock.WithTicker + + // Queue optionally allows injecting custom queue Interface instead of the default one. + Queue Interface +} + // NewDelayingQueue constructs a new workqueue with delayed queuing ability. // NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use -// NewNamedDelayingQueue instead. +// NewDelayingQueueWithConfig instead and specify a name. func NewDelayingQueue() DelayingInterface { - return NewDelayingQueueWithCustomClock(clock.RealClock{}, "") + return NewDelayingQueueWithConfig(DelayingQueueConfig{}) +} + +// NewDelayingQueueWithConfig constructs a new workqueue with options to +// customize different properties. +func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface { + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.Queue == nil { + config.Queue = NewWithConfig(QueueConfig{ + Name: config.Name, + MetricsProvider: config.MetricsProvider, + Clock: config.Clock, + }) + } + + return newDelayingQueue(config.Clock, config.Queue, config.Name, config.MetricsProvider) } // NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to // inject custom queue Interface instead of the default one +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface { - return newDelayingQueue(clock.RealClock{}, q, name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: name, + Queue: q, + }) } -// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability +// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability. +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewNamedDelayingQueue(name string) DelayingInterface { - return NewDelayingQueueWithCustomClock(clock.RealClock{}, name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{Name: name}) } // NewDelayingQueueWithCustomClock constructs a new named workqueue -// with ability to inject real or fake clock for testing purposes +// with ability to inject real or fake clock for testing purposes. +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface { - return newDelayingQueue(clock, NewNamed(name), name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: name, + Clock: clock, + }) } -func newDelayingQueue(clock clock.WithTicker, q Interface, name string) *delayingType { +func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider MetricsProvider) *delayingType { ret := &delayingType{ Interface: q, clock: clock, heartbeat: clock.NewTicker(maxWait), stopCh: make(chan struct{}), waitingForAddCh: make(chan *waitFor, 1000), - metrics: newRetryMetrics(name), + metrics: newRetryMetrics(name, provider), } go ret.waitingLoop() diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index 4b0a6961..f012ccc5 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -244,13 +244,18 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu } } -func newRetryMetrics(name string) retryMetrics { +func newRetryMetrics(name string, provider MetricsProvider) retryMetrics { var ret *defaultRetryMetrics if len(name) == 0 { return ret } + + if provider == nil { + provider = globalMetricsFactory.metricsProvider + } + return &defaultRetryMetrics{ - retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name), + retries: provider.NewRetriesMetric(name), } } diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index 6f706326..380c0645 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -33,17 +33,60 @@ type Interface interface { ShuttingDown() bool } +// QueueConfig specifies optional configurations to customize an Interface. +type QueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock ability to inject real or fake clock for testing purposes. + Clock clock.WithTicker +} + // New constructs a new work queue (see the package comment). func New() *Type { - return NewNamed("") + return NewWithConfig(QueueConfig{ + Name: "", + }) } +// NewWithConfig constructs a new workqueue with ability to +// customize different properties. +func NewWithConfig(config QueueConfig) *Type { + return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod) +} + +// NewNamed creates a new named queue. +// Deprecated: Use NewWithConfig instead. func NewNamed(name string) *Type { - rc := clock.RealClock{} + return NewWithConfig(QueueConfig{ + Name: name, + }) +} + +// newQueueWithConfig constructs a new named workqueue +// with the ability to customize different properties for testing purposes +func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type { + var metricsFactory *queueMetricsFactory + if config.MetricsProvider != nil { + metricsFactory = &queueMetricsFactory{ + metricsProvider: config.MetricsProvider, + } + } else { + metricsFactory = &globalMetricsFactory + } + + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + return newQueue( - rc, - globalMetricsFactory.newQueueMetrics(name, rc), - defaultUnfinishedWorkUpdatePeriod, + config.Clock, + metricsFactory.newQueueMetrics(config.Name, config.Clock), + updatePeriod, ) } diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go index 91cd33f1..3e4016fb 100644 --- a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go @@ -16,6 +16,8 @@ limitations under the License. package workqueue +import "k8s.io/utils/clock" + // RateLimitingInterface is an interface that rate limits items being added to the queue. type RateLimitingInterface interface { DelayingInterface @@ -32,29 +34,68 @@ type RateLimitingInterface interface { NumRequeues(item interface{}) int } +// RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface. + +type RateLimitingQueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock optionally allows injecting a real or fake clock for testing purposes. + Clock clock.WithTicker + + // DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one. + DelayingQueue DelayingInterface +} + // NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability // Remember to call Forget! If you don't, you may end up tracking failures forever. // NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use -// NewNamedRateLimitingQueue instead. +// NewRateLimitingQueueWithConfig instead and specify a name. func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{}) +} + +// NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability +// with options to customize different properties. +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface { + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.DelayingQueue == nil { + config.DelayingQueue = NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: config.Name, + MetricsProvider: config.MetricsProvider, + Clock: config.Clock, + }) + } + return &rateLimitingType{ - DelayingInterface: NewDelayingQueue(), + DelayingInterface: config.DelayingQueue, rateLimiter: rateLimiter, } } +// NewNamedRateLimitingQueue constructs a new named workqueue with rateLimited queuing ability. +// Deprecated: Use NewRateLimitingQueueWithConfig instead. func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: NewNamedDelayingQueue(name), - rateLimiter: rateLimiter, - } + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ + Name: name, + }) } +// NewRateLimitingQueueWithDelayingInterface constructs a new named workqueue with rateLimited queuing ability +// with the option to inject a custom delaying queue instead of the default one. +// Deprecated: Use NewRateLimitingQueueWithConfig instead. func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter RateLimiter) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: di, - rateLimiter: rateLimiter, - } + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ + DelayingQueue: di, + }) } // rateLimitingType wraps an Interface and provides rateLimited re-enquing diff --git a/vendor/k8s.io/component-base/config/types.go b/vendor/k8s.io/component-base/config/types.go index aad605ee..e1b9469d 100644 --- a/vendor/k8s.io/component-base/config/types.go +++ b/vendor/k8s.io/component-base/config/types.go @@ -74,7 +74,7 @@ type LeaderElectionConfiguration struct { type DebuggingConfiguration struct { // enableProfiling enables profiling via web interface host:port/debug/pprof/ EnableProfiling bool - // enableContentionProfiling enables lock contention profiling, if + // enableContentionProfiling enables block profiling, if // enableProfiling is true. EnableContentionProfiling bool } diff --git a/vendor/k8s.io/component-base/config/v1alpha1/types.go b/vendor/k8s.io/component-base/config/v1alpha1/types.go index c9d05525..3c5f004f 100644 --- a/vendor/k8s.io/component-base/config/v1alpha1/types.go +++ b/vendor/k8s.io/component-base/config/v1alpha1/types.go @@ -60,7 +60,7 @@ type LeaderElectionConfiguration struct { type DebuggingConfiguration struct { // enableProfiling enables profiling via web interface host:port/debug/pprof/ EnableProfiling *bool `json:"enableProfiling,omitempty"` - // enableContentionProfiling enables lock contention profiling, if + // enableContentionProfiling enables block profiling, if // enableProfiling is true. EnableContentionProfiling *bool `json:"enableContentionProfiling,omitempty"` } diff --git a/vendor/k8s.io/klog/v2/format.go b/vendor/k8s.io/klog/v2/format.go new file mode 100644 index 00000000..63995ca6 --- /dev/null +++ b/vendor/k8s.io/klog/v2/format.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-logr/logr" +) + +// Format wraps a value of an arbitrary type and implement fmt.Stringer and +// logr.Marshaler for them. Stringer returns pretty-printed JSON. MarshalLog +// returns the original value with a type that has no special methods, in +// particular no MarshalLog or MarshalJSON. +// +// Wrapping values like that is useful when the value has a broken +// implementation of these special functions (for example, a type which +// inherits String from TypeMeta, but then doesn't re-implement String) or the +// implementation produces output that is less readable or unstructured (for +// example, the generated String functions for Kubernetes API types). +func Format(obj interface{}) interface{} { + return formatAny{Object: obj} +} + +type formatAny struct { + Object interface{} +} + +func (f formatAny) String() string { + var buffer strings.Builder + encoder := json.NewEncoder(&buffer) + encoder.SetIndent("", " ") + if err := encoder.Encode(&f.Object); err != nil { + return fmt.Sprintf("error marshaling %T to JSON: %v", f, err) + } + return buffer.String() +} + +func (f formatAny) MarshalLog() interface{} { + // Returning a pointer to a pointer ensures that zapr doesn't find a + // fmt.Stringer or logr.Marshaler when it checks the type of the + // value. It then falls back to reflection, which dumps the value being + // pointed to (JSON doesn't have pointers). + ptr := &f.Object + return &ptr +} + +var _ fmt.Stringer = formatAny{} +var _ logr.Marshaler = formatAny{} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index 1dc81a15..bcdf5f8e 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -18,6 +18,7 @@ package serialize import ( "bytes" + "encoding/json" "fmt" "strconv" @@ -196,11 +197,11 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { case textWriter: writeTextWriterValue(b, v) case fmt.Stringer: - writeStringValue(b, true, StringerToString(v)) + writeStringValue(b, StringerToString(v)) case string: - writeStringValue(b, true, v) + writeStringValue(b, v) case error: - writeStringValue(b, true, ErrorToString(v)) + writeStringValue(b, ErrorToString(v)) case logr.Marshaler: value := MarshalerToValue(v) // A marshaler that returns a string is useful for @@ -215,9 +216,9 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { // value directly. switch value := value.(type) { case string: - writeStringValue(b, true, value) + writeStringValue(b, value) default: - writeStringValue(b, false, f.AnyToString(value)) + f.formatAny(b, value) } case []byte: // In https://github.com/kubernetes/klog/pull/237 it was decided @@ -234,7 +235,7 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { b.WriteByte('=') b.WriteString(fmt.Sprintf("%+q", v)) default: - writeStringValue(b, false, f.AnyToString(v)) + f.formatAny(b, v) } } @@ -242,12 +243,25 @@ func KVFormat(b *bytes.Buffer, k, v interface{}) { Formatter{}.KVFormat(b, k, v) } -// AnyToString is the historic fallback formatter. -func (f Formatter) AnyToString(v interface{}) string { +// formatAny is the fallback formatter for a value. It supports a hook (for +// example, for YAML encoding) and itself uses JSON encoding. +func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { + b.WriteRune('=') if f.AnyToStringHook != nil { - return f.AnyToStringHook(v) + b.WriteString(f.AnyToStringHook(v)) + return + } + encoder := json.NewEncoder(b) + l := b.Len() + if err := encoder.Encode(v); err != nil { + // This shouldn't happen. We discard whatever the encoder + // wrote and instead dump an error string. + b.Truncate(l) + b.WriteString(fmt.Sprintf(`""`, err)) + return } - return fmt.Sprintf("%+v", v) + // Remove trailing newline. + b.Truncate(b.Len() - 1) } // StringerToString converts a Stringer to a string, @@ -287,7 +301,7 @@ func ErrorToString(err error) (ret string) { } func writeTextWriterValue(b *bytes.Buffer, v textWriter) { - b.WriteRune('=') + b.WriteByte('=') defer func() { if err := recover(); err != nil { fmt.Fprintf(b, `""`, err) @@ -296,18 +310,13 @@ func writeTextWriterValue(b *bytes.Buffer, v textWriter) { v.WriteText(b) } -func writeStringValue(b *bytes.Buffer, quote bool, v string) { +func writeStringValue(b *bytes.Buffer, v string) { data := []byte(v) index := bytes.IndexByte(data, '\n') if index == -1 { b.WriteByte('=') - if quote { - // Simple string, quote quotation marks and non-printable characters. - b.WriteString(strconv.Quote(v)) - return - } - // Non-string with no line breaks. - b.WriteString(v) + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) return } diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go index ecd3f8b6..786af74b 100644 --- a/vendor/k8s.io/klog/v2/k8s_references.go +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -178,14 +178,14 @@ func (ks kobjSlice) process() (objs []interface{}, err string) { return objectRefs, "" } -var nilToken = []byte("") +var nilToken = []byte("null") func (ks kobjSlice) WriteText(out *bytes.Buffer) { s := reflect.ValueOf(ks.arg) switch s.Kind() { case reflect.Invalid: - // nil parameter, print as empty slice. - out.WriteString("[]") + // nil parameter, print as null. + out.Write(nilToken) return case reflect.Slice: // Okay, handle below. @@ -197,15 +197,15 @@ func (ks kobjSlice) WriteText(out *bytes.Buffer) { defer out.Write([]byte{']'}) for i := 0; i < s.Len(); i++ { if i > 0 { - out.Write([]byte{' '}) + out.Write([]byte{','}) } item := s.Index(i).Interface() if item == nil { out.Write(nilToken) } else if v, ok := item.(KMetadata); ok { - KObj(v).writeUnquoted(out) + KObj(v).WriteText(out) } else { - fmt.Fprintf(out, "", item) + fmt.Fprintf(out, `""`, item) return } } diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 466eeaf2..152f8a6b 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -1228,6 +1228,19 @@ func CopyStandardLogTo(name string) { stdLog.SetOutput(logBridge(sev)) } +// NewStandardLogger returns a Logger that writes to the klog logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, ok := severity.ByName(name) + if !ok { + panic(fmt.Sprintf("klog.NewStandardLogger(%q): unknown severity", name)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. type logBridge severity.Severity diff --git a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go index 16e34853..3972cd5f 100644 --- a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go +++ b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go @@ -37,8 +37,11 @@ limitations under the License. // # Atomicity // // Most of the operations are not atomic/thread-safe, except for -// [Replaceable.Replace] which can be performed while the objects -// are being read. +// [Replaceable.Replace] which can be performed while the objects are +// being read. Specifically, `Get` methods are NOT thread-safe. Never +// call `Get()` without a lock on a multi-threaded environment, since +// it's usually performing updates to caches that will require write +// operations. // // # Etags // @@ -97,6 +100,13 @@ func (r Result[T]) Get() Result[T] { type Data[T any] interface { // Returns the cached data, as well as an "etag" to identify the // version of the cache, or an error if something happened. + // + // # Important note + // + // This method is NEVER thread-safe, never assume it is OK to + // call `Get()` without holding a proper mutex in a + // multi-threaded environment, especially since `Get()` will + // usually update the cache and perform write operations. Get() Result[T] } @@ -249,6 +259,13 @@ type Replaceable[T any] struct { // previously had returned a success, that success will be returned // instead. If the cache fails but we never returned a success, that // failure is returned. +// +// # Important note +// +// As all implementations of Get, this implementation is NOT +// thread-safe. Please properly lock a mutex before calling this method +// if you are in a multi-threaded environment, since this method will +// update the cache and perform write operations. func (c *Replaceable[T]) Get() Result[T] { result := (*c.cache.Load()).Get() if result.Err != nil && c.result != nil && c.result.Err == nil { diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index a0b07a6d..187eb5d8 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -65,6 +65,11 @@ func durationToMilliseconds(timeDuration time.Duration) int64 { } type traceItem interface { + // rLock must be called before invoking time or writeItem. + rLock() + // rUnlock must be called after processing the item is complete. + rUnlock() + // time returns when the trace was recorded as completed. time() time.Time // writeItem outputs the traceItem to the buffer. If stepThreshold is non-nil, only output the @@ -79,6 +84,10 @@ type traceStep struct { fields []Field } +// rLock doesn't need to do anything because traceStep instances are immutable. +func (s traceStep) rLock() {} +func (s traceStep) rUnlock() {} + func (s traceStep) time() time.Time { return s.stepTime } @@ -106,6 +115,14 @@ type Trace struct { traceItems []traceItem } +func (t *Trace) rLock() { + t.lock.RLock() +} + +func (t *Trace) rUnlock() { + t.lock.RUnlock() +} + func (t *Trace) time() time.Time { if t.endTime != nil { return *t.endTime @@ -231,8 +248,10 @@ func (t *Trace) logTrace() { func (t *Trace) writeTraceSteps(b *bytes.Buffer, formatter string, stepThreshold *time.Duration) { lastStepTime := t.startTime for _, stepOrTrace := range t.traceItems { + stepOrTrace.rLock() stepOrTrace.writeItem(b, formatter, lastStepTime, stepThreshold) lastStepTime = stepOrTrace.time() + stepOrTrace.rUnlock() } } diff --git a/vendor/knative.dev/pkg/apis/OWNERS b/vendor/knative.dev/pkg/apis/OWNERS index 82f0814c..13014203 100644 --- a/vendor/knative.dev/pkg/apis/OWNERS +++ b/vendor/knative.dev/pkg/apis/OWNERS @@ -1,9 +1,15 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- mattmoor -- vaikas -- n3wscott +- technical-oversight-committee +- serving-wg-leads +- eventing-wg-leads reviewers: -- whaught +- serving-writers +- eventing-writers +- eventing-reviewers +- serving-reviewers + +options: + no_parent_owners: true diff --git a/vendor/knative.dev/pkg/apis/duck/OWNERS b/vendor/knative.dev/pkg/apis/duck/OWNERS index f6267f14..af1eb05d 100644 --- a/vendor/knative.dev/pkg/apis/duck/OWNERS +++ b/vendor/knative.dev/pkg/apis/duck/OWNERS @@ -1,8 +1,8 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- mattmoor -- vaikas +- eventing-wg-leads reviewers: -- whaught +- eventing-reviewers +- eventing-writers diff --git a/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go b/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go index 83e746a0..bdcb994f 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go @@ -38,7 +38,16 @@ import ( // typically stored in the object's `status`, as this information may // be generated by the controller. type Addressable struct { + // Name is the name of the address. + // +optional + Name *string `json:"name,omitempty"` + URL *apis.URL `json:"url,omitempty"` + + // CACerts is the Certification Authority (CA) certificates in PEM format + // according to https://www.rfc-editor.org/rfc/rfc7468. + // +optional + CACerts *string `json:"CACerts,omitempty"` } var ( @@ -62,7 +71,15 @@ type AddressableType struct { // AddressStatus shows how we expect folks to embed Addressable in // their Status field. type AddressStatus struct { + // Address is a single Addressable address. + // If Addresses is present, Address will be ignored by clients. + // +optional Address *Addressable `json:"address,omitempty"` + + // Addresses is a list of addresses for different protocols (HTTP and HTTPS) + // If Addresses is present, Address must be ignored by clients. + // +optional + Addresses []Addressable `json:"addresses,omitempty"` } // Verify AddressableType resources meet duck contracts. @@ -89,9 +106,11 @@ func (a *Addressable) ConvertFrom(ctx context.Context, from apis.Convertible) er // Populate implements duck.Populatable func (t *AddressableType) Populate() { + name := "http" t.Status = AddressStatus{ - &Addressable{ + Address: &Addressable{ // Populate ALL fields + Name: &name, URL: &apis.URL{ Scheme: "http", Host: "foo.com", diff --git a/vendor/knative.dev/pkg/apis/duck/v1/destination.go b/vendor/knative.dev/pkg/apis/duck/v1/destination.go index 87f0983f..15638f40 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/destination.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/destination.go @@ -31,6 +31,13 @@ type Destination struct { // URI can be an absolute URL(non-empty scheme and non-empty host) pointing to the target or a relative URI. Relative URIs will be resolved using the base URI retrieved from Ref. // +optional URI *apis.URL `json:"uri,omitempty"` + + // CACerts are Certification Authority (CA) certificates in PEM format + // according to https://www.rfc-editor.org/rfc/rfc7468. + // If set, these CAs are appended to the set of CAs provided + // by the Addressable target, if any. + // +optional + CACerts *string `json:"CACerts,omitempty"` } // Validate the Destination has all the necessary fields and check the @@ -73,6 +80,10 @@ func (d *Destination) GetRef() *KReference { } func (d *Destination) SetDefaults(ctx context.Context) { + if d == nil { + return + } + if d.Ref != nil && d.Ref.Namespace == "" { d.Ref.Namespace = apis.ParentMeta(ctx).Namespace } diff --git a/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go b/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go index a0b169d6..c723c147 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go @@ -49,6 +49,10 @@ type KReference struct { // Note: This API is EXPERIMENTAL and might break anytime. For more details: https://github.com/knative/eventing/issues/5086 // +optional Group string `json:"group,omitempty"` + + // Address points to a specific Address Name. + // +optional + Address *string `json:"address,omitempty"` } func (kr *KReference) Validate(ctx context.Context) *apis.FieldError { diff --git a/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go index 96638e57..83cccfd3 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go @@ -34,6 +34,13 @@ func (in *AddressStatus) DeepCopyInto(out *AddressStatus) { *out = new(Addressable) (*in).DeepCopyInto(*out) } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]Addressable, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -50,11 +57,21 @@ func (in *AddressStatus) DeepCopy() *AddressStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Addressable) DeepCopyInto(out *Addressable) { *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } if in.URL != nil { in, out := &in.URL, &out.URL *out = new(apis.URL) (*in).DeepCopyInto(*out) } + if in.CACerts != nil { + in, out := &in.CACerts, &out.CACerts + *out = new(string) + **out = **in + } return } @@ -332,13 +349,18 @@ func (in *Destination) DeepCopyInto(out *Destination) { if in.Ref != nil { in, out := &in.Ref, &out.Ref *out = new(KReference) - **out = **in + (*in).DeepCopyInto(*out) } if in.URI != nil { in, out := &in.URI, &out.URI *out = new(apis.URL) (*in).DeepCopyInto(*out) } + if in.CACerts != nil { + in, out := &in.CACerts, &out.CACerts + *out = new(string) + **out = **in + } return } @@ -355,6 +377,11 @@ func (in *Destination) DeepCopy() *Destination { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KReference) DeepCopyInto(out *KReference) { *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } return } diff --git a/vendor/knative.dev/pkg/configmap/OWNERS b/vendor/knative.dev/pkg/configmap/OWNERS deleted file mode 100644 index c3ff67e2..00000000 --- a/vendor/knative.dev/pkg/configmap/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- pkg-configmap-writers -reviewers: -- pkg-configmap-reviewers diff --git a/vendor/knative.dev/pkg/kmeta/OWNERS b/vendor/knative.dev/pkg/kmeta/OWNERS deleted file mode 100644 index a1e0f822..00000000 --- a/vendor/knative.dev/pkg/kmeta/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- mattmoor -- vagababov diff --git a/vendor/knative.dev/pkg/logging/OWNERS b/vendor/knative.dev/pkg/logging/OWNERS deleted file mode 100644 index f51b91be..00000000 --- a/vendor/knative.dev/pkg/logging/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- mdemirhan -- n3wscott -- yanweiguo diff --git a/vendor/knative.dev/pkg/metrics/OWNERS b/vendor/knative.dev/pkg/metrics/OWNERS deleted file mode 100644 index 7cd699f2..00000000 --- a/vendor/knative.dev/pkg/metrics/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- mdemirhan -- yanweiguo - -reviewers: -- mdemirhan -- yanweiguo -- skonto diff --git a/vendor/knative.dev/pkg/metrics/config.go b/vendor/knative.dev/pkg/metrics/config.go index 3fe3c173..ce5e7f87 100644 --- a/vendor/knative.dev/pkg/metrics/config.go +++ b/vendor/knative.dev/pkg/metrics/config.go @@ -41,19 +41,21 @@ const ( DomainEnv = "METRICS_DOMAIN" // The following keys are used to configure metrics reporting. - // See https://github.com/knative/serving/blob/main/config/config-observability.yaml + // See https://github.com/knative/serving/blob/main/config/core/configmaps/observability.yaml // for details. collectorAddressKey = "metrics.opencensus-address" collectorSecureKey = "metrics.opencensus-require-tls" reportingPeriodKey = "metrics.reporting-period-seconds" - defaultBackendEnvName = "DEFAULT_METRICS_BACKEND" - defaultPrometheusPort = 9090 - maxPrometheusPort = 65535 - minPrometheusPort = 1024 - defaultPrometheusHost = "0.0.0.0" - prometheusPortEnvName = "METRICS_PROMETHEUS_PORT" - prometheusHostEnvName = "METRICS_PROMETHEUS_HOST" + defaultBackendEnvName = "DEFAULT_METRICS_BACKEND" + defaultPrometheusPort = 9090 + defaultPrometheusReportingPeriod = 5 + defaultOpenCensusReportingPeriod = 60 + maxPrometheusPort = 65535 + minPrometheusPort = 1024 + defaultPrometheusHost = "0.0.0.0" + prometheusPortEnvName = "METRICS_PROMETHEUS_PORT" + prometheusHostEnvName = "METRICS_PROMETHEUS_HOST" ) var ( @@ -206,9 +208,9 @@ func createMetricsConfig(_ context.Context, ops ExporterOptions) (*metricsConfig } else { switch mc.backendDestination { case openCensus: - mc.reportingPeriod = time.Minute + mc.reportingPeriod = defaultOpenCensusReportingPeriod * time.Second case prometheus: - mc.reportingPeriod = 5 * time.Second + mc.reportingPeriod = defaultPrometheusReportingPeriod * time.Second } } return &mc, nil diff --git a/vendor/knative.dev/pkg/metrics/config_observability.go b/vendor/knative.dev/pkg/metrics/config_observability.go index b6affd29..e766b071 100644 --- a/vendor/knative.dev/pkg/metrics/config_observability.go +++ b/vendor/knative.dev/pkg/metrics/config_observability.go @@ -71,6 +71,10 @@ type ObservabilityConfig struct { // OpenCensus. "None" disables all backends. RequestMetricsBackend string + // RequestMetricsReportingPeriodSeconds specifies the request metrics reporting period in sec at queue proxy, eg 1. + // If a zero or negative value is passed the default reporting period is used (10 secs). + RequestMetricsReportingPeriodSeconds int + // EnableProfiling indicates whether it is allowed to retrieve runtime profiling data from // the pods via an HTTP server in the format expected by the pprof visualization tool. EnableProfiling bool @@ -114,6 +118,12 @@ func NewObservabilityConfigFromConfigMap(configMap *corev1.ConfigMap) (*Observab return oc, nil } + defaultRequestMetricsReportingPeriod, err := getDefaultRequestMetricsReportingPeriod(configMap.Data) + if err != nil { + return nil, err + } + oc.RequestMetricsReportingPeriodSeconds = defaultRequestMetricsReportingPeriod + if err := cm.Parse(configMap.Data, cm.AsBool("logging.enable-var-log-collection", &oc.EnableVarLogCollection), cm.AsString("logging.revision-url-template", &oc.LoggingURLTemplate), @@ -121,6 +131,7 @@ func NewObservabilityConfigFromConfigMap(configMap *corev1.ConfigMap) (*Observab cm.AsBool(EnableReqLogKey, &oc.EnableRequestLog), cm.AsBool(EnableProbeReqLogKey, &oc.EnableProbeRequestLog), cm.AsString("metrics.request-metrics-backend-destination", &oc.RequestMetricsBackend), + cm.AsInt("metrics.request-metrics-reporting-period-seconds", &oc.RequestMetricsReportingPeriodSeconds), cm.AsBool("profiling.enable", &oc.EnableProfiling), cm.AsString("metrics.opencensus-address", &oc.MetricsCollectorAddress), ); err != nil { @@ -163,3 +174,27 @@ func ConfigMapName() string { } return "config-observability" } + +// Use the same as `metrics.reporting-period-seconds` for the default +// of `metrics.request-metrics-reporting-period-seconds` +func getDefaultRequestMetricsReportingPeriod(data map[string]string) (int, error) { + // Default backend is prometheus + period := defaultPrometheusReportingPeriod + if repStr := data[reportingPeriodKey]; repStr != "" { + repInt, err := strconv.Atoi(repStr) + if err != nil { + return -1, fmt.Errorf("invalid %s value %q", reportingPeriodKey, repStr) + } + period = repInt + } else { + if raw, ok := data["metrics.request-metrics-backend-destination"]; ok { + switch metricsBackend(raw) { + case prometheus: + period = defaultPrometheusReportingPeriod + case openCensus: + period = defaultOpenCensusReportingPeriod + } + } + } + return period, nil +} diff --git a/vendor/knative.dev/pkg/metrics/opencensus_exporter.go b/vendor/knative.dev/pkg/metrics/opencensus_exporter.go index eaeac4a5..59e33ab0 100644 --- a/vendor/knative.dev/pkg/metrics/opencensus_exporter.go +++ b/vendor/knative.dev/pkg/metrics/opencensus_exporter.go @@ -99,7 +99,7 @@ func getCredentials(component string, secret *corev1.Secret, logger *zap.Sugared return nil } return credentials.NewTLS(&tls.Config{ - MinVersion: tls.VersionTLS12, + MinVersion: tls.VersionTLS13, GetClientCertificate: func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { cert, err := tls.X509KeyPair(secret.Data["client-cert.pem"], secret.Data["client-key.pem"]) if err != nil { diff --git a/vendor/knative.dev/pkg/metrics/prometheus_exporter.go b/vendor/knative.dev/pkg/metrics/prometheus_exporter.go index aec6c09a..5f11c12e 100644 --- a/vendor/knative.dev/pkg/metrics/prometheus_exporter.go +++ b/vendor/knative.dev/pkg/metrics/prometheus_exporter.go @@ -20,6 +20,7 @@ import ( "net/http" "strconv" "sync" + "time" prom "contrib.go.opencensus.io/exporter/prometheus" "go.opencensus.io/resource" @@ -82,10 +83,10 @@ func startNewPromSrv(e *prom.Exporter, host string, port int) *http.Server { if curPromSrv != nil { curPromSrv.Close() } - //nolint:gosec curPromSrv = &http.Server{ - Addr: host + ":" + strconv.Itoa(port), - Handler: sm, + Addr: host + ":" + strconv.Itoa(port), + Handler: sm, + ReadHeaderTimeout: time.Minute, //https://medium.com/a-journey-with-go/go-understand-and-mitigate-slowloris-attack-711c1b1403f6 } return curPromSrv } diff --git a/vendor/modules.txt b/vendor/modules.txt index 5904a324..5a3eb96f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -41,7 +41,7 @@ github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/cloudflare/circl v1.3.3 +# github.com/cloudflare/circl v1.3.7 ## explicit; go 1.19 github.com/cloudflare/circl/dh/x25519 github.com/cloudflare/circl/dh/x448 @@ -124,7 +124,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.16.1 +# github.com/google/go-containerregistry v0.19.1 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name # github.com/google/go-github/v53 v53.2.0 @@ -168,9 +168,6 @@ github.com/json-iterator/go github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter -# github.com/matttproud/golang_protobuf_extensions v1.0.4 -## explicit; go 1.9 -github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent @@ -180,7 +177,7 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg -# github.com/onsi/ginkgo/v2 v2.16.0 +# github.com/onsi/ginkgo/v2 v2.17.1 ## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -202,7 +199,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.31.1 +# github.com/onsi/gomega v1.32.0 ## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format @@ -220,21 +217,21 @@ github.com/opencontainers/go-digest # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus/client_golang v1.17.0 -## explicit; go 1.19 +# github.com/prometheus/client_golang v1.19.0 +## explicit; go 1.20 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.5.0 +# github.com/prometheus/client_model v0.6.0 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.44.0 -## explicit; go 1.18 +# github.com/prometheus/common v0.48.0 +## explicit; go 1.20 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.11.1 +# github.com/prometheus/procfs v0.12.0 ## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -244,8 +241,8 @@ github.com/prometheus/procfs/internal/util github.com/prometheus/statsd_exporter/pkg/level github.com/prometheus/statsd_exporter/pkg/mapper github.com/prometheus/statsd_exporter/pkg/mapper/fsm -# github.com/shipwright-io/build v0.12.0 -## explicit; go 1.20 +# github.com/shipwright-io/build v0.13.0 +## explicit; go 1.21 github.com/shipwright-io/build/pkg/apis/build/v1alpha1 github.com/shipwright-io/build/pkg/apis/build/v1beta1 github.com/shipwright-io/build/pkg/ctxlog @@ -253,7 +250,7 @@ github.com/shipwright-io/build/pkg/webhook # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/tektoncd/pipeline v0.47.4 +# github.com/tektoncd/pipeline v0.50.5 ## explicit; go 1.19 github.com/tektoncd/pipeline/pkg/apis/config github.com/tektoncd/pipeline/pkg/apis/pipeline @@ -294,7 +291,7 @@ go.uber.org/atomic # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# go.uber.org/zap v1.26.0 +# go.uber.org/zap v1.27.0 ## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer @@ -305,7 +302,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.18.0 +# golang.org/x/crypto v0.22.0 ## explicit; go 1.18 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b @@ -315,7 +312,7 @@ golang.org/x/crypto/sha3 # golang.org/x/exp v0.0.0-20230307190834-24139beb5833 ## explicit; go 1.18 golang.org/x/exp/maps -# golang.org/x/net v0.20.0 +# golang.org/x/net v0.24.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -327,20 +324,20 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.11.0 +# golang.org/x/oauth2 v0.16.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.6.0 ## explicit; go 1.18 golang.org/x/sync/semaphore -# golang.org/x/sys v0.16.0 +# golang.org/x/sys v0.19.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.16.0 +# golang.org/x/term v0.19.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -445,14 +442,16 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.31.0 -## explicit; go 1.11 +# google.golang.org/protobuf v1.33.0 +## explicit; go 1.17 +google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/editiondefaults google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset @@ -476,6 +475,7 @@ google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb +google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/fieldmaskpb @@ -491,8 +491,8 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.26.9 -## explicit; go 1.19 +# k8s.io/api v0.27.11 +## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -515,6 +515,7 @@ k8s.io/api/autoscaling/v2beta2 k8s.io/api/batch/v1 k8s.io/api/batch/v1beta1 k8s.io/api/certificates/v1 +k8s.io/api/certificates/v1alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 @@ -539,24 +540,26 @@ k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 -k8s.io/api/resource/v1alpha1 +k8s.io/api/resource/v1alpha2 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.26.9 -## explicit; go 1.19 +# k8s.io/apiextensions-apiserver v0.27.11 +## explicit; go 1.20 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 +k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1 +k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.26.9 -## explicit; go 1.19 +# k8s.io/apimachinery v0.27.11 +## explicit; go 1.20 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -589,6 +592,7 @@ k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/managedfields +k8s.io/apimachinery/pkg/util/managedfields/internal k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net @@ -605,11 +609,11 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.26.9 -## explicit; go 1.19 +# k8s.io/apiserver v0.27.11 +## explicit; go 1.20 k8s.io/apiserver/pkg/storage/names -# k8s.io/client-go v0.26.9 -## explicit; go 1.19 +# k8s.io/client-go v0.27.11 +## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -624,6 +628,7 @@ k8s.io/client-go/applyconfigurations/autoscaling/v2beta2 k8s.io/client-go/applyconfigurations/batch/v1 k8s.io/client-go/applyconfigurations/batch/v1beta1 k8s.io/client-go/applyconfigurations/certificates/v1 +k8s.io/client-go/applyconfigurations/certificates/v1alpha1 k8s.io/client-go/applyconfigurations/certificates/v1beta1 k8s.io/client-go/applyconfigurations/coordination/v1 k8s.io/client-go/applyconfigurations/coordination/v1beta1 @@ -650,7 +655,7 @@ k8s.io/client-go/applyconfigurations/policy/v1beta1 k8s.io/client-go/applyconfigurations/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 -k8s.io/client-go/applyconfigurations/resource/v1alpha1 +k8s.io/client-go/applyconfigurations/resource/v1alpha2 k8s.io/client-go/applyconfigurations/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 @@ -680,6 +685,7 @@ k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2 k8s.io/client-go/kubernetes/typed/batch/v1 k8s.io/client-go/kubernetes/typed/batch/v1beta1 k8s.io/client-go/kubernetes/typed/certificates/v1 +k8s.io/client-go/kubernetes/typed/certificates/v1alpha1 k8s.io/client-go/kubernetes/typed/certificates/v1beta1 k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 @@ -704,7 +710,7 @@ k8s.io/client-go/kubernetes/typed/policy/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1 -k8s.io/client-go/kubernetes/typed/resource/v1alpha1 +k8s.io/client-go/kubernetes/typed/resource/v1alpha2 k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 @@ -728,6 +734,7 @@ k8s.io/client-go/rest/watch k8s.io/client-go/restmapper k8s.io/client-go/tools/auth k8s.io/client-go/tools/cache +k8s.io/client-go/tools/cache/synctrack k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest @@ -747,11 +754,11 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.26.9 -## explicit; go 1.19 +# k8s.io/component-base v0.27.11 +## explicit; go 1.20 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 -# k8s.io/klog/v2 v2.90.1 +# k8s.io/klog/v2 v2.100.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -759,8 +766,8 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a -## explicit; go 1.18 +# k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5 +## explicit; go 1.19 k8s.io/kube-openapi/pkg/builder3/util k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common @@ -773,7 +780,7 @@ k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20230209194617-a36077c30491 +# k8s.io/utils v0.0.0-20230505201702-9f6742963106 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -784,7 +791,7 @@ k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/strings/slices k8s.io/utils/trace -# knative.dev/pkg v0.0.0-20230221145627-8efb3485adcf +# knative.dev/pkg v0.0.0-20231011201526-df28feae6d34 ## explicit; go 1.18 knative.dev/pkg/apis knative.dev/pkg/apis/duck @@ -801,8 +808,8 @@ knative.dev/pkg/metrics knative.dev/pkg/metrics/metricskey knative.dev/pkg/tracker knative.dev/pkg/webhook/resourcesemantics -# sigs.k8s.io/controller-runtime v0.14.6 -## explicit; go 1.19 +# sigs.k8s.io/controller-runtime v0.15.3 +## explicit; go 1.20 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder sigs.k8s.io/controller-runtime/pkg/cache @@ -828,8 +835,8 @@ sigs.k8s.io/controller-runtime/pkg/internal/field/selector sigs.k8s.io/controller-runtime/pkg/internal/flock sigs.k8s.io/controller-runtime/pkg/internal/httpserver sigs.k8s.io/controller-runtime/pkg/internal/log -sigs.k8s.io/controller-runtime/pkg/internal/objectutil sigs.k8s.io/controller-runtime/pkg/internal/recorder +sigs.k8s.io/controller-runtime/pkg/internal/source sigs.k8s.io/controller-runtime/pkg/internal/testing/addr sigs.k8s.io/controller-runtime/pkg/internal/testing/certs sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane @@ -844,10 +851,8 @@ sigs.k8s.io/controller-runtime/pkg/predicate sigs.k8s.io/controller-runtime/pkg/ratelimiter sigs.k8s.io/controller-runtime/pkg/reconcile sigs.k8s.io/controller-runtime/pkg/recorder -sigs.k8s.io/controller-runtime/pkg/runtime/inject sigs.k8s.io/controller-runtime/pkg/scheme sigs.k8s.io/controller-runtime/pkg/source -sigs.k8s.io/controller-runtime/pkg/source/internal sigs.k8s.io/controller-runtime/pkg/webhook sigs.k8s.io/controller-runtime/pkg/webhook/admission sigs.k8s.io/controller-runtime/pkg/webhook/conversion @@ -859,6 +864,7 @@ sigs.k8s.io/json/internal/golang/encoding/json # sigs.k8s.io/structured-merge-diff/v4 v4.2.3 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath +sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value diff --git a/vendor/sigs.k8s.io/controller-runtime/.golangci.yml b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml index 209b7f4e..817c2c72 100644 --- a/vendor/sigs.k8s.io/controller-runtime/.golangci.yml +++ b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml @@ -1,38 +1,44 @@ linters: disable-all: true enable: - - asciicheck - - bodyclose - - depguard - - dogsled - - errcheck - - errorlint - - exportloopref - - goconst - - gocritic - - gocyclo - - gofmt - - goimports - - goprintffuncname - - gosec - - gosimple - - govet - - importas - - ineffassign - - misspell - - nakedret - - nilerr - - nolintlint - - prealloc - - revive - - rowserrcheck - - staticcheck - - stylecheck - - typecheck - - unconvert - - unparam - - unused - - whitespace + - asasalint + - asciicheck + - bidichk + - bodyclose + - depguard + - dogsled + - dupl + - errcheck + - errchkjson + - errorlint + - exhaustive + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nilerr + - nolintlint + - prealloc + - revive + - staticcheck + - stylecheck + - tagliatelle + - typecheck + - unconvert + - unparam + - unused + - whitespace linters-settings: importas: @@ -53,13 +59,42 @@ linters-settings: - pkg: sigs.k8s.io/controller-runtime alias: ctrl staticcheck: - go: "1.19" + go: "1.20" stylecheck: - go: "1.19" + go: "1.20" depguard: include-go-root: true packages: - io/ioutil # https://go.dev/doc/go1.16#ioutil + revive: + rules: + # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: superfluous-else + - name: unreachable-code + - name: redefines-builtin-id + # + # Rules in addition to the recommended configuration above. + # + - name: bool-literal-in-expr + - name: constant-logical-expr issues: max-same-issues: 0 @@ -69,68 +104,71 @@ issues: exclude-use-default: false # List of regexps of issue texts to exclude, empty list by default. exclude: - # The following are being worked on to remove their exclusion. This list should be reduced or go away all together over time. - # If it is decided they will not be addressed they should be moved above this comment. - - Subprocess launch(ed with variable|ing should be audited) - - (G204|G104|G307) - - "ST1000: at least one file in a package should have a package comment" + # The following are being worked on to remove their exclusion. This list should be reduced or go away all together over time. + # If it is decided they will not be addressed they should be moved above this comment. + - Subprocess launch(ed with variable|ing should be audited) + - (G204|G104|G307) + - "ST1000: at least one file in a package should have a package comment" exclude-rules: - - linters: - - gosec - text: "G108: Profiling endpoint is automatically exposed on /debug/pprof" - - linters: - - revive - text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported" - - linters: - - errcheck - text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked - # With Go 1.16, the new embed directive can be used with an un-named import, - # revive (previously, golint) only allows these to be imported in a main.go, which wouldn't work for us. - # This directive allows the embed package to be imported with an underscore everywhere. - - linters: - - revive - source: _ "embed" - # Exclude some packages or code to require comments, for example test code, or fake clients. - - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - source: (func|type).*Fake.* - - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - path: fake_\.go - # Disable unparam "always receives" which might not be really - # useful when building libraries. - - linters: - - unparam - text: always receives - # Dot imports for gomega or ginkgo are allowed - # within test files. - - path: _test\.go - text: should not use dot imports - - path: _test\.go - text: cyclomatic complexity - - path: _test\.go - text: "G107: Potential HTTP request made with variable url" - # Append should be able to assign to a different var/slice. - - linters: - - gocritic - text: "appendAssign: append result not assigned to the same slice" - - linters: - - gocritic - text: "singleCaseSwitch: should rewrite switch statement to if statement" - # It considers all file access to a filename that comes from a variable problematic, - # which is naiv at best. - - linters: - - gosec - text: "G304: Potential file inclusion via variable" - - linters: - - revive - text: "package-comments: should have a package comment" + - linters: + - gosec + text: "G108: Profiling endpoint is automatically exposed on /debug/pprof" + - linters: + - revive + text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported" + - linters: + - errcheck + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + - linters: + - staticcheck + text: "SA1019: .*The component config package has been deprecated and will be removed in a future release." + # With Go 1.16, the new embed directive can be used with an un-named import, + # revive (previously, golint) only allows these to be imported in a main.go, which wouldn't work for us. + # This directive allows the embed package to be imported with an underscore everywhere. + - linters: + - revive + source: _ "embed" + # Exclude some packages or code to require comments, for example test code, or fake clients. + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + source: (func|type).*Fake.* + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + path: fake_\.go + # Disable unparam "always receives" which might not be really + # useful when building libraries. + - linters: + - unparam + text: always receives + # Dot imports for gomega and ginkgo are allowed + # within test files. + - path: _test\.go + text: should not use dot imports + - path: _test\.go + text: cyclomatic complexity + - path: _test\.go + text: "G107: Potential HTTP request made with variable url" + # Append should be able to assign to a different var/slice. + - linters: + - gocritic + text: "appendAssign: append result not assigned to the same slice" + - linters: + - gocritic + text: "singleCaseSwitch: should rewrite switch statement to if statement" + # It considers all file access to a filename that comes from a variable problematic, + # which is naiv at best. + - linters: + - gosec + text: "G304: Potential file inclusion via variable" + - linters: + - dupl + path: _test\.go run: timeout: 10m skip-files: - - "zz_generated.*\\.go$" - - ".*conversion.*\\.go$" + - "zz_generated.*\\.go$" + - ".*conversion.*\\.go$" allow-parallel-runners: true diff --git a/vendor/sigs.k8s.io/controller-runtime/Makefile b/vendor/sigs.k8s.io/controller-runtime/Makefile index 36647c69..71ec644d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/Makefile +++ b/vendor/sigs.k8s.io/controller-runtime/Makefile @@ -75,7 +75,7 @@ $(CONTROLLER_GEN): $(TOOLS_DIR)/go.mod # Build controller-gen from tools folder. $(GOLANGCI_LINT): .github/workflows/golangci-lint.yml # Download golanci-lint using hack script into tools folder. hack/ensure-golangci-lint.sh \ -b $(TOOLS_BIN_DIR) \ - $(shell cat .github/workflows/golangci-lint.yml | grep version | sed 's/.*version: //') + $(shell cat .github/workflows/golangci-lint.yml | grep "version: v" | sed 's/.*version: //') ## -------------------------------------- ## Linting @@ -117,7 +117,15 @@ clean-bin: ## Remove all generated binaries. rm -rf hack/tools/bin .PHONY: verify-modules -verify-modules: modules - @if !(git diff --quiet HEAD -- go.sum go.mod); then \ +verify-modules: modules ## Verify go modules are up to date + @if !(git diff --quiet HEAD -- go.sum go.mod $(TOOLS_DIR)/go.mod $(TOOLS_DIR)/go.sum $(ENVTEST_DIR)/go.mod $(ENVTEST_DIR)/go.sum); then \ + git diff; \ echo "go module files are out of date, please run 'make modules'"; exit 1; \ fi + +.PHONY: verify-generate +verify-generate: generate ## Verify generated files are up to date + @if !(git diff --quiet HEAD); then \ + git diff; \ + echo "generated files are out of date, run make generate"; exit 1; \ + fi diff --git a/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES b/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES index 71089478..7848941d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES +++ b/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES @@ -9,23 +9,21 @@ aliases: # non-admin folks who have write-access and can approve any PRs in the repo controller-runtime-maintainers: - - vincepri + - alvaroaleman - joelanford + - sbueringer + - vincepri # non-admin folks who can approve any PRs in the repo controller-runtime-approvers: - - alvaroaleman - fillzpp - - sbueringer # folks who can review and LGTM any PRs in the repo (doesn't # include approvers & admins -- those count too via the OWNERS # file) controller-runtime-reviewers: - - vincepri - varshaprasad96 - - fillzpp - - sbueringer + - inteon # folks to can approve things in the directly-ported # testing_frameworks portions of the codebase diff --git a/vendor/sigs.k8s.io/controller-runtime/README.md b/vendor/sigs.k8s.io/controller-runtime/README.md index 484881dc..e785abdd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/README.md +++ b/vendor/sigs.k8s.io/controller-runtime/README.md @@ -16,8 +16,8 @@ Documentation: - [Basic controller using builder](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/builder#example-Builder) - [Creating a manager](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#example-New) - [Creating a controller](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller#example-New) -- [Examples](https://github.com/kubernetes-sigs/controller-runtime/blob/master/examples) -- [Designs](https://github.com/kubernetes-sigs/controller-runtime/blob/master/designs) +- [Examples](https://github.com/kubernetes-sigs/controller-runtime/blob/main/examples) +- [Designs](https://github.com/kubernetes-sigs/controller-runtime/blob/main/designs) # Versioning, Maintenance, and Compatibility @@ -27,7 +27,7 @@ Users: - We follow [Semantic Versioning (semver)](https://semver.org) - Use releases with your dependency management to ensure that you get compatible code -- The master branch contains all the latest code, some of which may break compatibility (so "normal" `go get` is not recommended) +- The main branch contains all the latest code, some of which may break compatibility (so "normal" `go get` is not recommended) Contributors: diff --git a/vendor/sigs.k8s.io/controller-runtime/RELEASE.md b/vendor/sigs.k8s.io/controller-runtime/RELEASE.md index 134a73a3..f234494f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/RELEASE.md +++ b/vendor/sigs.k8s.io/controller-runtime/RELEASE.md @@ -10,7 +10,7 @@ to create a new branch you will just need to ensure that all big fixes are cherr ### Create the new branch and the release tag -1. Create a new branch `git checkout -b release-` from master +1. Create a new branch `git checkout -b release-` from main 2. Push the new branch to the remote repository ### Now, let's generate the changelog diff --git a/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS b/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS index 32e6a3b9..9c5241c6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS +++ b/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS @@ -10,5 +10,6 @@ # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # INSTRUCTIONS AT https://kubernetes.io/security/ -pwittrock -droot +alvaroaleman +sbueringer +vincepri diff --git a/vendor/sigs.k8s.io/controller-runtime/alias.go b/vendor/sigs.k8s.io/controller-runtime/alias.go index 35cba30b..23796388 100644 --- a/vendor/sigs.k8s.io/controller-runtime/alias.go +++ b/vendor/sigs.k8s.io/controller-runtime/alias.go @@ -99,6 +99,8 @@ var ( // ConfigFile returns the cfg.File function for deferred config file loading, // this is passed into Options{}.From() to populate the Options fields for // the manager. + // + // Deprecated: This is deprecated in favor of using Options directly. ConfigFile = cfg.File // NewControllerManagedBy returns a new controller builder that will be started by the provided Manager. @@ -139,7 +141,7 @@ var ( // The logger, when used with controllers, can be expected to contain basic information about the object // that's being reconciled like: // - `reconciler group` and `reconciler kind` coming from the For(...) object passed in when building a controller. - // - `name` and `namespace` injected from the reconciliation request. + // - `name` and `namespace` from the reconciliation request. // // This is meant to be used with the context supplied in a struct that satisfies the Reconciler interface. LoggerFrom = log.FromContext diff --git a/vendor/sigs.k8s.io/controller-runtime/doc.go b/vendor/sigs.k8s.io/controller-runtime/doc.go index fa6c532c..0319bc3f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/doc.go @@ -46,13 +46,13 @@ limitations under the License. // // Frequently asked questions about using controller-runtime and designing // controllers can be found at -// https://github.com/kubernetes-sigs/controller-runtime/blob/master/FAQ.md. +// https://github.com/kubernetes-sigs/controller-runtime/blob/main/FAQ.md. // // # Managers // // Every controller and webhook is ultimately run by a Manager (pkg/manager). A // manager is responsible for running controllers and webhooks, and setting up -// common dependencies (pkg/runtime/inject), like shared caches and clients, as +// common dependencies, like shared caches and clients, as // well as managing leader election (pkg/leaderelection). Managers are // generally configured to gracefully shut down controllers on pod termination // by wiring up a signal handler (pkg/manager/signals). diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go index 03f9633a..570cfd63 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" + internalsource "sigs.k8s.io/controller-runtime/pkg/internal/source" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -96,14 +97,20 @@ func (blder *Builder) For(object client.Object, opts ...ForOption) *Builder { // OwnsInput represents the information set by Owns method. type OwnsInput struct { + matchEveryOwner bool object client.Object predicates []predicate.Predicate objectProjection objectProjection } // Owns defines types of Objects being *generated* by the ControllerManagedBy, and configures the ControllerManagedBy to respond to -// create / delete / update events by *reconciling the owner object*. This is the equivalent of calling -// Watches(&source.Kind{Type: }, &handler.EnqueueRequestForOwner{OwnerType: apiType, IsController: true}). +// create / delete / update events by *reconciling the owner object*. +// +// The default behavior reconciles only the first controller-type OwnerReference of the given type. +// Use Owns(object, builder.MatchEveryOwner) to reconcile all owners. +// +// By default, this is the equivalent of calling +// Watches(object, handler.EnqueueRequestForOwner([...], ownerType, OnlyControllerOwner())). func (blder *Builder) Owns(object client.Object, opts ...OwnsOption) *Builder { input := OwnsInput{object: object} for _, opt := range opts { @@ -122,10 +129,54 @@ type WatchesInput struct { objectProjection objectProjection } -// Watches exposes the lower-level ControllerManagedBy Watches functions through the builder. Consider using -// Owns or For instead of Watches directly. +// Watches defines the type of Object to watch, and configures the ControllerManagedBy to respond to create / delete / +// update events by *reconciling the object* with the given EventHandler. +// +// This is the equivalent of calling +// WatchesRawSource(source.Kind(scheme, object), eventhandler, opts...). +func (blder *Builder) Watches(object client.Object, eventhandler handler.EventHandler, opts ...WatchesOption) *Builder { + src := source.Kind(blder.mgr.GetCache(), object) + return blder.WatchesRawSource(src, eventhandler, opts...) +} + +// WatchesMetadata is the same as Watches, but forces the internal cache to only watch PartialObjectMetadata. +// +// This is useful when watching lots of objects, really big objects, or objects for which you only know +// the GVK, but not the structure. You'll need to pass metav1.PartialObjectMetadata to the client +// when fetching objects in your reconciler, otherwise you'll end up with a duplicate structured or unstructured cache. +// +// When watching a resource with metadata only, for example the v1.Pod, you should not Get and List using the v1.Pod type. +// Instead, you should use the special metav1.PartialObjectMetadata type. +// +// ⌠Incorrect: +// +// pod := &v1.Pod{} +// mgr.GetClient().Get(ctx, nsAndName, pod) +// +// ✅ Correct: +// +// pod := &metav1.PartialObjectMetadata{} +// pod.SetGroupVersionKind(schema.GroupVersionKind{ +// Group: "", +// Version: "v1", +// Kind: "Pod", +// }) +// mgr.GetClient().Get(ctx, nsAndName, pod) +// +// In the first case, controller-runtime will create another cache for the +// concrete type on top of the metadata cache; this increases memory +// consumption and leads to race conditions as caches are not in sync. +func (blder *Builder) WatchesMetadata(object client.Object, eventhandler handler.EventHandler, opts ...WatchesOption) *Builder { + opts = append(opts, OnlyMetadata) + return blder.Watches(object, eventhandler, opts...) +} + +// WatchesRawSource exposes the lower-level ControllerManagedBy Watches functions through the builder. // Specified predicates are registered only for given source. -func (blder *Builder) Watches(src source.Source, eventhandler handler.EventHandler, opts ...WatchesOption) *Builder { +// +// STOP! Consider using For(...), Owns(...), Watches(...), WatchesMetadata(...) instead. +// This method is only exposed for more advanced use cases, most users should use higher level functions. +func (blder *Builder) WatchesRawSource(src source.Source, eventhandler handler.EventHandler, opts ...WatchesOption) *Builder { input := WatchesInput{src: src, eventhandler: eventhandler} for _, opt := range opts { opt.ApplyToWatches(&input) @@ -217,11 +268,11 @@ func (blder *Builder) project(obj client.Object, proj objectProjection) (client. func (blder *Builder) doWatch() error { // Reconcile type if blder.forInput.object != nil { - typeForSrc, err := blder.project(blder.forInput.object, blder.forInput.objectProjection) + obj, err := blder.project(blder.forInput.object, blder.forInput.objectProjection) if err != nil { return err } - src := &source.Kind{Type: typeForSrc} + src := source.Kind(blder.mgr.GetCache(), obj) hdler := &handler.EnqueueRequestForObject{} allPredicates := append(blder.globalPredicates, blder.forInput.predicates...) if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { @@ -234,15 +285,20 @@ func (blder *Builder) doWatch() error { return errors.New("Owns() can only be used together with For()") } for _, own := range blder.ownsInput { - typeForSrc, err := blder.project(own.object, own.objectProjection) + obj, err := blder.project(own.object, own.objectProjection) if err != nil { return err } - src := &source.Kind{Type: typeForSrc} - hdler := &handler.EnqueueRequestForOwner{ - OwnerType: blder.forInput.object, - IsController: true, + src := source.Kind(blder.mgr.GetCache(), obj) + opts := []handler.OwnerOption{} + if !own.matchEveryOwner { + opts = append(opts, handler.OnlyControllerOwner()) } + hdler := handler.EnqueueRequestForOwner( + blder.mgr.GetScheme(), blder.mgr.GetRESTMapper(), + blder.forInput.object, + opts..., + ) allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) allPredicates = append(allPredicates, own.predicates...) if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { @@ -258,8 +314,8 @@ func (blder *Builder) doWatch() error { allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) allPredicates = append(allPredicates, w.predicates...) - // If the source of this watch is of type *source.Kind, project it. - if srckind, ok := w.src.(*source.Kind); ok { + // If the source of this watch is of type Kind, project it. + if srckind, ok := w.src.(*internalsource.Kind); ok { typeForSrc, err := blder.project(srckind.Type, w.objectProjection) if err != nil { return err @@ -314,8 +370,8 @@ func (blder *Builder) doController(r reconcile.Reconciler) error { } // Setup cache sync timeout. - if ctrlOptions.CacheSyncTimeout == 0 && globalOpts.CacheSyncTimeout != nil { - ctrlOptions.CacheSyncTimeout = *globalOpts.CacheSyncTimeout + if ctrlOptions.CacheSyncTimeout == 0 && globalOpts.CacheSyncTimeout > 0 { + ctrlOptions.CacheSyncTimeout = globalOpts.CacheSyncTimeout } controllerName, err := blder.getControllerName(gvk, hasGVK) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go index 3a66491b..bce2065e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go @@ -138,3 +138,19 @@ var ( ) // }}} + +// MatchEveryOwner determines whether the watch should be filtered based on +// controller ownership. As in, when the OwnerReference.Controller field is set. +// +// If passed as an option, +// the handler receives notification for every owner of the object with the given type. +// If unset (default), the handler receives notification only for the first +// OwnerReference with `Controller: true`. +var MatchEveryOwner = &matchEveryOwner{} + +type matchEveryOwner struct{} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (o matchEveryOwner) ApplyToOwns(opts *OwnsInput) { + opts.matchEveryOwner = true +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go index 534e6d64..82fac4d8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go @@ -22,9 +22,12 @@ import ( "net/url" "strings" + "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -33,13 +36,14 @@ import ( // WebhookBuilder builds a Webhook. type WebhookBuilder struct { - apiType runtime.Object - withDefaulter admission.CustomDefaulter - withValidator admission.CustomValidator - gvk schema.GroupVersionKind - mgr manager.Manager - config *rest.Config - recoverPanic bool + apiType runtime.Object + withDefaulter admission.CustomDefaulter + withValidator admission.CustomValidator + gvk schema.GroupVersionKind + mgr manager.Manager + config *rest.Config + recoverPanic bool + logConstructor func(base logr.Logger, req *admission.Request) logr.Logger } // WebhookManagedBy allows inform its manager.Manager. @@ -69,6 +73,12 @@ func (blder *WebhookBuilder) WithValidator(validator admission.CustomValidator) return blder } +// WithLogConstructor overrides the webhook's LogConstructor. +func (blder *WebhookBuilder) WithLogConstructor(logConstructor func(base logr.Logger, req *admission.Request) logr.Logger) *WebhookBuilder { + blder.logConstructor = logConstructor + return blder +} + // RecoverPanic indicates whether the panic caused by webhook should be recovered. func (blder *WebhookBuilder) RecoverPanic() *WebhookBuilder { blder.recoverPanic = true @@ -80,6 +90,9 @@ func (blder *WebhookBuilder) Complete() error { // Set the Config blder.loadRestConfig() + // Configure the default LogConstructor + blder.setLogConstructor() + // Set the Webhook if needed return blder.registerWebhooks() } @@ -90,6 +103,26 @@ func (blder *WebhookBuilder) loadRestConfig() { } } +func (blder *WebhookBuilder) setLogConstructor() { + if blder.logConstructor == nil { + blder.logConstructor = func(base logr.Logger, req *admission.Request) logr.Logger { + log := base.WithValues( + "webhookGroup", blder.gvk.Group, + "webhookKind", blder.gvk.Kind, + ) + if req != nil { + return log.WithValues( + blder.gvk.Kind, klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + "resource", req.Resource, "user", req.UserInfo.Username, + "requestID", req.UID, + ) + } + return log + } + } +} + func (blder *WebhookBuilder) registerWebhooks() error { typ, err := blder.getType() if err != nil { @@ -116,6 +149,7 @@ func (blder *WebhookBuilder) registerWebhooks() error { func (blder *WebhookBuilder) registerDefaultingWebhook() { mwh := blder.getDefaultingWebhook() if mwh != nil { + mwh.LogConstructor = blder.logConstructor path := generateMutatePath(blder.gvk) // Checking if the path is already registered. @@ -131,10 +165,10 @@ func (blder *WebhookBuilder) registerDefaultingWebhook() { func (blder *WebhookBuilder) getDefaultingWebhook() *admission.Webhook { if defaulter := blder.withDefaulter; defaulter != nil { - return admission.WithCustomDefaulter(blder.apiType, defaulter).WithRecoverPanic(blder.recoverPanic) + return admission.WithCustomDefaulter(blder.mgr.GetScheme(), blder.apiType, defaulter).WithRecoverPanic(blder.recoverPanic) } if defaulter, ok := blder.apiType.(admission.Defaulter); ok { - return admission.DefaultingWebhookFor(defaulter).WithRecoverPanic(blder.recoverPanic) + return admission.DefaultingWebhookFor(blder.mgr.GetScheme(), defaulter).WithRecoverPanic(blder.recoverPanic) } log.Info( "skip registering a mutating webhook, object does not implement admission.Defaulter or WithDefaulter wasn't called", @@ -145,6 +179,7 @@ func (blder *WebhookBuilder) getDefaultingWebhook() *admission.Webhook { func (blder *WebhookBuilder) registerValidatingWebhook() { vwh := blder.getValidatingWebhook() if vwh != nil { + vwh.LogConstructor = blder.logConstructor path := generateValidatePath(blder.gvk) // Checking if the path is already registered. @@ -160,10 +195,10 @@ func (blder *WebhookBuilder) registerValidatingWebhook() { func (blder *WebhookBuilder) getValidatingWebhook() *admission.Webhook { if validator := blder.withValidator; validator != nil { - return admission.WithCustomValidator(blder.apiType, validator).WithRecoverPanic(blder.recoverPanic) + return admission.WithCustomValidator(blder.mgr.GetScheme(), blder.apiType, validator).WithRecoverPanic(blder.recoverPanic) } if validator, ok := blder.apiType.(admission.Validator); ok { - return admission.ValidatingWebhookFor(validator).WithRecoverPanic(blder.recoverPanic) + return admission.ValidatingWebhookFor(blder.mgr.GetScheme(), validator).WithRecoverPanic(blder.recoverPanic) } log.Info( "skip registering a validating webhook, object does not implement admission.Validator or WithValidator wasn't called", @@ -179,7 +214,7 @@ func (blder *WebhookBuilder) registerConversionWebhook() error { } if ok { if !blder.isAlreadyHandled("/convert") { - blder.mgr.GetWebhookServer().Register("/convert", &conversion.Webhook{}) + blder.mgr.GetWebhookServer().Register("/convert", conversion.NewWebhookHandler(blder.mgr.GetScheme())) } log.Info("Conversion webhook enabled", "GVK", blder.gvk) } @@ -195,10 +230,10 @@ func (blder *WebhookBuilder) getType() (runtime.Object, error) { } func (blder *WebhookBuilder) isAlreadyHandled(path string) bool { - if blder.mgr.GetWebhookServer().WebhookMux == nil { + if blder.mgr.GetWebhookServer().WebhookMux() == nil { return false } - h, p := blder.mgr.GetWebhookServer().WebhookMux.Handler(&http.Request{URL: &url.URL{Path: path}}) + h, p := blder.mgr.GetWebhookServer().WebhookMux().Handler(&http.Request{URL: &url.URL{Path: path}}) if p == path && h != nil { return true } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index bcb1141a..76003870 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -19,10 +19,11 @@ package cache import ( "context" "fmt" - "reflect" + "net/http" "time" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -37,7 +38,10 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) -var log = logf.RuntimeLog.WithName("object-cache") +var ( + log = logf.RuntimeLog.WithName("object-cache") + defaultSyncPeriod = 10 * time.Hour +) // Cache knows how to load Kubernetes objects, fetch informers to request // to receive events for Kubernetes objects (at a low-level), @@ -98,310 +102,157 @@ type Informer interface { HasSynced() bool } -// ObjectSelector is an alias name of internal.Selector. -type ObjectSelector internal.Selector - -// SelectorsByObject associate a client.Object's GVK to a field/label selector. -// There is also `DefaultSelector` to set a global default (which will be overridden by -// a more specific setting here, if any). -type SelectorsByObject map[client.Object]ObjectSelector - // Options are the optional arguments for creating a new InformersMap object. type Options struct { + // HTTPClient is the http client to use for the REST client + HTTPClient *http.Client + // Scheme is the scheme to use for mapping objects to GroupVersionKinds Scheme *runtime.Scheme // Mapper is the RESTMapper to use for mapping GroupVersionKinds to Resources Mapper meta.RESTMapper - // Resync is the base frequency the informers are resynced. - // Defaults to defaultResyncTime. - // A 10 percent jitter will be added to the Resync period between informers - // So that all informers will not send list requests simultaneously. - Resync *time.Duration - - // Namespace restricts the cache's ListWatch to the desired namespace + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // + // This applies to all controllers. + // + // A period sync happens for two reasons: + // 1. To insure against a bug in the controller that causes an object to not + // be requeued, when it otherwise should be requeued. + // 2. To insure against an unknown bug in controller-runtime, or its dependencies, + // that causes an object to not be requeued, when it otherwise should be + // requeued, or to be removed from the queue, when it otherwise should not + // be removed. + // + // If you want + // 1. to insure against missed watch events, or + // 2. to poll services that cannot be watched, + // then we recommend that, instead of changing the default period, the + // controller requeue, with a constant duration `t`, whenever the controller + // is "done" with an object, and would otherwise not requeue it, i.e., we + // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, + // instead of `reconcile.Result{}`. + SyncPeriod *time.Duration + + // Namespaces restricts the cache's ListWatch to the desired namespaces // Default watches all namespaces - Namespace string + Namespaces []string - // SelectorsByObject restricts the cache's ListWatch to the desired - // fields per GVK at the specified object, the map's value must implement - // Selector [1] using for example a Set [2] - // [1] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Selector - // [2] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Set - SelectorsByObject SelectorsByObject + // DefaultLabelSelector will be used as a label selectors for all object types + // unless they have a more specific selector set in ByObject. + DefaultLabelSelector labels.Selector - // DefaultSelector will be used as selectors for all object types - // that do not have a selector in SelectorsByObject defined. - DefaultSelector ObjectSelector + // DefaultFieldSelector will be used as a field selectors for all object types + // unless they have a more specific selector set in ByObject. + DefaultFieldSelector fields.Selector - // UnsafeDisableDeepCopyByObject indicates not to deep copy objects during get or - // list objects per GVK at the specified object. + // DefaultTransform will be used as transform for all object types + // unless they have a more specific transform set in ByObject. + DefaultTransform toolscache.TransformFunc + + // ByObject restricts the cache's ListWatch to the desired fields per GVK at the specified object. + ByObject map[client.Object]ByObject + + // UnsafeDisableDeepCopy indicates not to deep copy objects during get or + // list objects for EVERY object. // Be very careful with this, when enabled you must DeepCopy any object before mutating it, // otherwise you will mutate the object in the cache. - UnsafeDisableDeepCopyByObject DisableDeepCopyByObject + // + // This is a global setting for all objects, and can be overridden by the ByObject setting. + UnsafeDisableDeepCopy *bool +} - // TransformByObject is a map from GVKs to transformer functions which +// ByObject offers more fine-grained control over the cache's ListWatch by object. +type ByObject struct { + // Label represents a label selector for the object. + Label labels.Selector + + // Field represents a field selector for the object. + Field fields.Selector + + // Transform is a map from objects to transformer functions which // get applied when objects of the transformation are about to be committed // to cache. // // This function is called both for new objects to enter the cache, - // and for updated objects. - TransformByObject TransformByObject + // and for updated objects. + Transform toolscache.TransformFunc - // DefaultTransform is the transform used for all GVKs which do - // not have an explicit transform func set in TransformByObject - DefaultTransform toolscache.TransformFunc + // UnsafeDisableDeepCopy indicates not to deep copy objects during get or + // list objects per GVK at the specified object. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + UnsafeDisableDeepCopy *bool } -var defaultResyncTime = 10 * time.Hour +// NewCacheFunc - Function for creating a new cache from the options and a rest config. +type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) // New initializes and returns a new Cache. func New(config *rest.Config, opts Options) (Cache, error) { - opts, err := defaultOpts(config, opts) - if err != nil { - return nil, err + if len(opts.Namespaces) == 0 { + opts.Namespaces = []string{metav1.NamespaceAll} } - selectorsByGVK, err := convertToByGVK(opts.SelectorsByObject, opts.DefaultSelector, opts.Scheme) - if err != nil { - return nil, err + if len(opts.Namespaces) > 1 { + return newMultiNamespaceCache(config, opts) } - disableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(opts.UnsafeDisableDeepCopyByObject, opts.Scheme) - if err != nil { - return nil, err - } - transformByGVK, err := convertToByGVK(opts.TransformByObject, opts.DefaultTransform, opts.Scheme) - if err != nil { - return nil, err - } - transformByObj := internal.TransformFuncByObjectFromMap(transformByGVK) - - internalSelectorsByGVK := internal.SelectorsByGVK{} - for gvk, selector := range selectorsByGVK { - internalSelectorsByGVK[gvk] = internal.Selector(selector) - } - - im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, internalSelectorsByGVK, disableDeepCopyByGVK, transformByObj) - return &informerCache{InformersMap: im}, nil -} -// BuilderWithOptions returns a Cache constructor that will build a cache -// honoring the options argument, this is useful to specify options like -// SelectorsByObject -// WARNING: If SelectorsByObject is specified, filtered out resources are not -// returned. -// WARNING: If UnsafeDisableDeepCopy is enabled, you must DeepCopy any object -// returned from cache get/list before mutating it. -func BuilderWithOptions(options Options) NewCacheFunc { - return func(config *rest.Config, inherited Options) (Cache, error) { - var err error - inherited, err = defaultOpts(config, inherited) - if err != nil { - return nil, err - } - options, err = defaultOpts(config, options) - if err != nil { - return nil, err - } - combined, err := options.inheritFrom(inherited) - if err != nil { - return nil, err - } - return New(config, *combined) - } -} - -func (options Options) inheritFrom(inherited Options) (*Options, error) { - var ( - combined Options - err error - ) - combined.Scheme = combineScheme(inherited.Scheme, options.Scheme) - combined.Mapper = selectMapper(inherited.Mapper, options.Mapper) - combined.Resync = selectResync(inherited.Resync, options.Resync) - combined.Namespace = selectNamespace(inherited.Namespace, options.Namespace) - combined.SelectorsByObject, combined.DefaultSelector, err = combineSelectors(inherited, options, combined.Scheme) - if err != nil { - return nil, err - } - combined.UnsafeDisableDeepCopyByObject, err = combineUnsafeDeepCopy(inherited, options, combined.Scheme) - if err != nil { - return nil, err - } - combined.TransformByObject, combined.DefaultTransform, err = combineTransforms(inherited, options, combined.Scheme) + opts, err := defaultOpts(config, opts) if err != nil { return nil, err } - return &combined, nil -} - -func combineScheme(schemes ...*runtime.Scheme) *runtime.Scheme { - var out *runtime.Scheme - for _, sch := range schemes { - if sch == nil { - continue - } - for gvk, t := range sch.AllKnownTypes() { - if out == nil { - out = runtime.NewScheme() - } - out.AddKnownTypeWithName(gvk, reflect.New(t).Interface().(runtime.Object)) - } - } - return out -} - -func selectMapper(def, override meta.RESTMapper) meta.RESTMapper { - if override != nil { - return override - } - return def -} - -func selectResync(def, override *time.Duration) *time.Duration { - if override != nil { - return override - } - return def -} - -func selectNamespace(def, override string) string { - if override != "" { - return override - } - return def -} -func combineSelectors(inherited, options Options, scheme *runtime.Scheme) (SelectorsByObject, ObjectSelector, error) { - // Selectors are combined via logical AND. - // - Combined label selector is a union of the selectors requirements from both sets of options. - // - Combined field selector uses fields.AndSelectors with the combined list of non-nil field selectors - // defined in both sets of options. - // - // There is a bunch of complexity here because we need to convert to SelectorsByGVK - // to be able to match keys between options and inherited and then convert back to SelectorsByObject - optionsSelectorsByGVK, err := convertToByGVK(options.SelectorsByObject, options.DefaultSelector, scheme) - if err != nil { - return nil, ObjectSelector{}, err - } - inheritedSelectorsByGVK, err := convertToByGVK(inherited.SelectorsByObject, inherited.DefaultSelector, inherited.Scheme) - if err != nil { - return nil, ObjectSelector{}, err - } - - for gvk, inheritedSelector := range inheritedSelectorsByGVK { - optionsSelectorsByGVK[gvk] = combineSelector(inheritedSelector, optionsSelectorsByGVK[gvk]) - } - return convertToByObject(optionsSelectorsByGVK, scheme) -} - -func combineSelector(selectors ...ObjectSelector) ObjectSelector { - ls := make([]labels.Selector, 0, len(selectors)) - fs := make([]fields.Selector, 0, len(selectors)) - for _, s := range selectors { - ls = append(ls, s.Label) - fs = append(fs, s.Field) - } - return ObjectSelector{ - Label: combineLabelSelectors(ls...), - Field: combineFieldSelectors(fs...), - } -} - -func combineLabelSelectors(ls ...labels.Selector) labels.Selector { - var combined labels.Selector - for _, l := range ls { - if l == nil { - continue - } - if combined == nil { - combined = labels.NewSelector() - } - reqs, _ := l.Requirements() - combined = combined.Add(reqs...) - } - return combined -} - -func combineFieldSelectors(fs ...fields.Selector) fields.Selector { - nonNil := fs[:0] - for _, f := range fs { - if f == nil { - continue - } - nonNil = append(nonNil, f) - } - if len(nonNil) == 0 { - return nil - } - if len(nonNil) == 1 { - return nonNil[0] - } - return fields.AndSelectors(nonNil...) -} - -func combineUnsafeDeepCopy(inherited, options Options, scheme *runtime.Scheme) (DisableDeepCopyByObject, error) { - // UnsafeDisableDeepCopyByObject is combined via precedence. Only if a value for a particular GVK is unset - // in options will a value from inherited be used. - optionsDisableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(options.UnsafeDisableDeepCopyByObject, options.Scheme) + byGVK, err := convertToInformerOptsByGVK(opts.ByObject, opts.Scheme) if err != nil { return nil, err } - inheritedDisableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(inherited.UnsafeDisableDeepCopyByObject, inherited.Scheme) - if err != nil { - return nil, err - } - - for gvk, inheritedDeepCopy := range inheritedDisableDeepCopyByGVK { - if _, ok := optionsDisableDeepCopyByGVK[gvk]; !ok { - if optionsDisableDeepCopyByGVK == nil { - optionsDisableDeepCopyByGVK = map[schema.GroupVersionKind]bool{} - } - optionsDisableDeepCopyByGVK[gvk] = inheritedDeepCopy - } - } - return convertToDisableDeepCopyByObject(optionsDisableDeepCopyByGVK, scheme) + // Set the default selector and transform. + byGVK[schema.GroupVersionKind{}] = internal.InformersOptsByGVK{ + Selector: internal.Selector{ + Label: opts.DefaultLabelSelector, + Field: opts.DefaultFieldSelector, + }, + Transform: opts.DefaultTransform, + UnsafeDisableDeepCopy: opts.UnsafeDisableDeepCopy, + } + + return &informerCache{ + scheme: opts.Scheme, + Informers: internal.NewInformers(config, &internal.InformersOpts{ + HTTPClient: opts.HTTPClient, + Scheme: opts.Scheme, + Mapper: opts.Mapper, + ResyncPeriod: *opts.SyncPeriod, + Namespace: opts.Namespaces[0], + ByGVK: byGVK, + }), + }, nil } -func combineTransforms(inherited, options Options, scheme *runtime.Scheme) (TransformByObject, toolscache.TransformFunc, error) { - // Transform functions are combined via chaining. If both inherited and options define a transform - // function, the transform function from inherited will be called first, and the transform function from - // options will be called second. - optionsTransformByGVK, err := convertToByGVK(options.TransformByObject, options.DefaultTransform, options.Scheme) - if err != nil { - return nil, nil, err - } - inheritedTransformByGVK, err := convertToByGVK(inherited.TransformByObject, inherited.DefaultTransform, inherited.Scheme) - if err != nil { - return nil, nil, err +func defaultOpts(config *rest.Config, opts Options) (Options, error) { + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() } - for gvk, inheritedTransform := range inheritedTransformByGVK { - if optionsTransformByGVK == nil { - optionsTransformByGVK = map[schema.GroupVersionKind]toolscache.TransformFunc{} - } - optionsTransformByGVK[gvk] = combineTransform(inheritedTransform, optionsTransformByGVK[gvk]) - } - return convertToByObject(optionsTransformByGVK, scheme) -} + logger := log.WithName("setup") -func combineTransform(inherited, current toolscache.TransformFunc) toolscache.TransformFunc { - if inherited == nil { - return current - } - if current == nil { - return inherited - } - return func(in interface{}) (interface{}, error) { - mid, err := inherited(in) + // Use the rest HTTP client for the provided config if unset + if opts.HTTPClient == nil { + var err error + opts.HTTPClient, err = rest.HTTPClientFor(config) if err != nil { - return nil, err + logger.Error(err, "Failed to get HTTP client") + return opts, fmt.Errorf("could not create HTTP client from config: %w", err) } - return current(mid) } -} -func defaultOpts(config *rest.Config, opts Options) (Options, error) { // Use the default Kubernetes Scheme if unset if opts.Scheme == nil { opts.Scheme = scheme.Scheme @@ -410,108 +261,38 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { // Construct a new Mapper if unset if opts.Mapper == nil { var err error - opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config) + opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config, opts.HTTPClient) if err != nil { - log.WithName("setup").Error(err, "Failed to get API Group-Resources") - return opts, fmt.Errorf("could not create RESTMapper from config") + logger.Error(err, "Failed to get API Group-Resources") + return opts, fmt.Errorf("could not create RESTMapper from config: %w", err) } } // Default the resync period to 10 hours if unset - if opts.Resync == nil { - opts.Resync = &defaultResyncTime + if opts.SyncPeriod == nil { + opts.SyncPeriod = &defaultSyncPeriod } return opts, nil } -func convertToByGVK[T any](byObject map[client.Object]T, def T, scheme *runtime.Scheme) (map[schema.GroupVersionKind]T, error) { - byGVK := map[schema.GroupVersionKind]T{} - for object, value := range byObject { +func convertToInformerOptsByGVK(in map[client.Object]ByObject, scheme *runtime.Scheme) (map[schema.GroupVersionKind]internal.InformersOptsByGVK, error) { + out := map[schema.GroupVersionKind]internal.InformersOptsByGVK{} + for object, byObject := range in { gvk, err := apiutil.GVKForObject(object, scheme) if err != nil { return nil, err } - byGVK[gvk] = value - } - byGVK[schema.GroupVersionKind{}] = def - return byGVK, nil -} - -func convertToByObject[T any](byGVK map[schema.GroupVersionKind]T, scheme *runtime.Scheme) (map[client.Object]T, T, error) { - var byObject map[client.Object]T - def := byGVK[schema.GroupVersionKind{}] - for gvk, value := range byGVK { - if gvk == (schema.GroupVersionKind{}) { - continue - } - obj, err := scheme.New(gvk) - if err != nil { - return nil, def, err - } - cObj, ok := obj.(client.Object) - if !ok { - return nil, def, fmt.Errorf("object %T for GVK %q does not implement client.Object", obj, gvk) - } - cObj.GetObjectKind().SetGroupVersionKind(gvk) - if byObject == nil { - byObject = map[client.Object]T{} - } - byObject[cObj] = value - } - return byObject, def, nil -} - -// DisableDeepCopyByObject associate a client.Object's GVK to disable DeepCopy during get or list from cache. -type DisableDeepCopyByObject map[client.Object]bool - -var _ client.Object = &ObjectAll{} - -// ObjectAll is the argument to represent all objects' types. -type ObjectAll struct { - client.Object -} - -func convertToDisableDeepCopyByGVK(disableDeepCopyByObject DisableDeepCopyByObject, scheme *runtime.Scheme) (internal.DisableDeepCopyByGVK, error) { - disableDeepCopyByGVK := internal.DisableDeepCopyByGVK{} - for obj, disable := range disableDeepCopyByObject { - switch obj.(type) { - case ObjectAll, *ObjectAll: - disableDeepCopyByGVK[internal.GroupVersionKindAll] = disable - default: - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return nil, err - } - disableDeepCopyByGVK[gvk] = disable + if _, ok := out[gvk]; ok { + return nil, fmt.Errorf("duplicate cache options for GVK %v, cache.Options.ByObject has multiple types with the same GroupVersionKind", gvk) } - } - return disableDeepCopyByGVK, nil -} - -func convertToDisableDeepCopyByObject(byGVK internal.DisableDeepCopyByGVK, scheme *runtime.Scheme) (DisableDeepCopyByObject, error) { - var byObject DisableDeepCopyByObject - for gvk, value := range byGVK { - if byObject == nil { - byObject = DisableDeepCopyByObject{} - } - if gvk == (schema.GroupVersionKind{}) { - byObject[ObjectAll{}] = value - continue - } - obj, err := scheme.New(gvk) - if err != nil { - return nil, err - } - cObj, ok := obj.(client.Object) - if !ok { - return nil, fmt.Errorf("object %T for GVK %q does not implement client.Object", obj, gvk) + out[gvk] = internal.InformersOptsByGVK{ + Selector: internal.Selector{ + Field: byObject.Field, + Label: byObject.Label, + }, + Transform: byObject.Transform, + UnsafeDisableDeepCopy: byObject.UnsafeDisableDeepCopy, } - - byObject[cObj] = value } - return byObject, nil + return out, nil } - -// TransformByObject associate a client.Object's GVK to a transformer function -// to be applied when storing the object into the cache. -type TransformByObject map[client.Object]toolscache.TransformFunc diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go index 08e4e6df..771244d5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -19,10 +19,10 @@ package cache import ( "context" "fmt" - "reflect" "strings" apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -45,19 +45,21 @@ func (*ErrCacheNotStarted) Error() string { return "the cache is not started, can not read objects" } -// informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap. +// informerCache is a Kubernetes Object cache populated from internal.Informers. +// informerCache wraps internal.Informers. type informerCache struct { - *internal.InformersMap + scheme *runtime.Scheme + *internal.Informers } // Get implements Reader. -func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { - gvk, err := apiutil.GVKForObject(out, ip.Scheme) +func (ic *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { + gvk, err := apiutil.GVKForObject(out, ic.scheme) if err != nil { return err } - started, cache, err := ip.InformersMap.Get(ctx, gvk, out) + started, cache, err := ic.Informers.Get(ctx, gvk, out) if err != nil { return err } @@ -69,13 +71,13 @@ func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out clie } // List implements Reader. -func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { - gvk, cacheTypeObj, err := ip.objectTypeForListObject(out) +func (ic *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { + gvk, cacheTypeObj, err := ic.objectTypeForListObject(out) if err != nil { return err } - started, cache, err := ip.InformersMap.Get(ctx, *gvk, cacheTypeObj) + started, cache, err := ic.Informers.Get(ctx, *gvk, cacheTypeObj) if err != nil { return err } @@ -90,54 +92,46 @@ func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts . // objectTypeForListObject tries to find the runtime.Object and associated GVK // for a single object corresponding to the passed-in list type. We need them // because they are used as cache map key. -func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { - gvk, err := apiutil.GVKForObject(list, ip.Scheme) +func (ic *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { + gvk, err := apiutil.GVKForObject(list, ic.scheme) if err != nil { return nil, nil, err } - // we need the non-list GVK, so chop off the "List" from the end of the kind - if strings.HasSuffix(gvk.Kind, "List") && apimeta.IsListType(list) { - gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] - } + // We need the non-list GVK, so chop off the "List" from the end of the kind. + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - _, isUnstructured := list.(*unstructured.UnstructuredList) - var cacheTypeObj runtime.Object - if isUnstructured { + // Handle unstructured.UnstructuredList. + if _, isUnstructured := list.(runtime.Unstructured); isUnstructured { u := &unstructured.Unstructured{} u.SetGroupVersionKind(gvk) - cacheTypeObj = u - } else { - itemsPtr, err := apimeta.GetItemsPtr(list) - if err != nil { - return nil, nil, err - } - // http://knowyourmeme.com/memes/this-is-fine - elemType := reflect.Indirect(reflect.ValueOf(itemsPtr)).Type().Elem() - if elemType.Kind() != reflect.Ptr { - elemType = reflect.PtrTo(elemType) - } - - cacheTypeValue := reflect.Zero(elemType) - var ok bool - cacheTypeObj, ok = cacheTypeValue.Interface().(runtime.Object) - if !ok { - return nil, nil, fmt.Errorf("cannot get cache for %T, its element %T is not a runtime.Object", list, cacheTypeValue.Interface()) - } + return &gvk, u, nil + } + // Handle metav1.PartialObjectMetadataList. + if _, isPartialObjectMetadata := list.(*metav1.PartialObjectMetadataList); isPartialObjectMetadata { + pom := &metav1.PartialObjectMetadata{} + pom.SetGroupVersionKind(gvk) + return &gvk, pom, nil } + // Any other list type should have a corresponding non-list type registered + // in the scheme. Use that to create a new instance of the non-list type. + cacheTypeObj, err := ic.scheme.New(gvk) + if err != nil { + return nil, nil, err + } return &gvk, cacheTypeObj, nil } // GetInformerForKind returns the informer for the GroupVersionKind. -func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { +func (ic *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { // Map the gvk to an object - obj, err := ip.Scheme.New(gvk) + obj, err := ic.scheme.New(gvk) if err != nil { return nil, err } - _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + _, i, err := ic.Informers.Get(ctx, gvk, obj) if err != nil { return nil, err } @@ -145,13 +139,13 @@ func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.Grou } // GetInformer returns the informer for the obj. -func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { - gvk, err := apiutil.GVKForObject(obj, ip.Scheme) +func (ic *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + gvk, err := apiutil.GVKForObject(obj, ic.scheme) if err != nil { return nil, err } - _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + _, i, err := ic.Informers.Get(ctx, gvk, obj) if err != nil { return nil, err } @@ -160,7 +154,7 @@ func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (In // NeedLeaderElection implements the LeaderElectionRunnable interface // to indicate that this can be started without requiring the leader lock. -func (ip *informerCache) NeedLeaderElection() bool { +func (ic *informerCache) NeedLeaderElection() bool { return false } @@ -169,8 +163,8 @@ func (ip *informerCache) NeedLeaderElection() bool { // to List. For one-to-one compatibility with "normal" field selectors, only return one value. // The values may be anything. They will automatically be prefixed with the namespace of the // given object, if present. The objects passed are guaranteed to be objects of the correct type. -func (ip *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - informer, err := ip.GetInformer(ctx, obj) +func (ic *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + informer, err := ic.GetInformer(ctx, obj) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index f78b0833..3c8355bb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -27,9 +27,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/cache" - "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" ) // CacheReader is a client.Reader. @@ -147,7 +147,7 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli } obj, isObj := item.(runtime.Object) if !isObj { - return fmt.Errorf("cache contained %T, which is not an Object", obj) + return fmt.Errorf("cache contained %T, which is not an Object", item) } meta, err := apimeta.Accessor(obj) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go deleted file mode 100644 index 27f46e32..00000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "context" - "time" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" -) - -// InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. -// It uses a standard parameter codec constructed based on the given generated Scheme. -type InformersMap struct { - // we abstract over the details of structured/unstructured/metadata with the specificInformerMaps - // TODO(directxman12): genericize this over different projections now that we have 3 different maps - - structured *specificInformersMap - unstructured *specificInformersMap - metadata *specificInformersMap - - // Scheme maps runtime.Objects to GroupVersionKinds - Scheme *runtime.Scheme -} - -// NewInformersMap creates a new InformersMap that can create informers for -// both structured and unstructured objects. -func NewInformersMap(config *rest.Config, - scheme *runtime.Scheme, - mapper meta.RESTMapper, - resync time.Duration, - namespace string, - selectors SelectorsByGVK, - disableDeepCopy DisableDeepCopyByGVK, - transformers TransformFuncByObject, -) *InformersMap { - return &InformersMap{ - structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), - unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), - metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), - - Scheme: scheme, - } -} - -// Start calls Run on each of the informers and sets started to true. Blocks on the context. -func (m *InformersMap) Start(ctx context.Context) error { - go m.structured.Start(ctx) - go m.unstructured.Start(ctx) - go m.metadata.Start(ctx) - <-ctx.Done() - return nil -} - -// WaitForCacheSync waits until all the caches have been started and synced. -func (m *InformersMap) WaitForCacheSync(ctx context.Context) bool { - syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...) - syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...) - syncedFuncs = append(syncedFuncs, m.metadata.HasSyncedFuncs()...) - - if !m.structured.waitForStarted(ctx) { - return false - } - if !m.unstructured.waitForStarted(ctx) { - return false - } - if !m.metadata.waitForStarted(ctx) { - return false - } - return cache.WaitForCacheSync(ctx.Done(), syncedFuncs...) -} - -// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns -// the Informer from the map. -func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { - switch obj.(type) { - case *unstructured.Unstructured: - return m.unstructured.Get(ctx, gvk, obj) - case *unstructured.UnstructuredList: - return m.unstructured.Get(ctx, gvk, obj) - case *metav1.PartialObjectMetadata: - return m.metadata.Get(ctx, gvk, obj) - case *metav1.PartialObjectMetadataList: - return m.metadata.Get(ctx, gvk, obj) - default: - return m.structured.Get(ctx, gvk, obj) - } -} - -// newStructuredInformersMap creates a new InformersMap for structured objects. -func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createStructuredListWatch) -} - -// newUnstructuredInformersMap creates a new InformersMap for unstructured objects. -func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createUnstructuredListWatch) -} - -// newMetadataInformersMap creates a new InformersMap for metadata-only objects. -func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createMetadataListWatch) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go deleted file mode 100644 index 54bd7eec..00000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import "k8s.io/apimachinery/pkg/runtime/schema" - -// GroupVersionKindAll is the argument to represent all GroupVersionKind types. -var GroupVersionKindAll = schema.GroupVersionKind{} - -// DisableDeepCopyByGVK associate a GroupVersionKind to disable DeepCopy during get or list from cache. -type DisableDeepCopyByGVK map[schema.GroupVersionKind]bool - -// IsDisabled returns whether a GroupVersionKind is disabled DeepCopy. -func (disableByGVK DisableDeepCopyByGVK) IsDisabled(gvk schema.GroupVersionKind) bool { - if d, ok := disableByGVK[gvk]; ok { - return d - } else if d, ok = disableByGVK[GroupVersionKindAll]; ok { - return d - } - return false -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go new file mode 100644 index 00000000..09e01111 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go @@ -0,0 +1,560 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// InformersOpts configures an InformerMap. +type InformersOpts struct { + HTTPClient *http.Client + Scheme *runtime.Scheme + Mapper meta.RESTMapper + ResyncPeriod time.Duration + Namespace string + ByGVK map[schema.GroupVersionKind]InformersOptsByGVK +} + +// InformersOptsByGVK configured additional by group version kind (or object) +// in an InformerMap. +type InformersOptsByGVK struct { + Selector Selector + Transform cache.TransformFunc + UnsafeDisableDeepCopy *bool +} + +// NewInformers creates a new InformersMap that can create informers under the hood. +func NewInformers(config *rest.Config, options *InformersOpts) *Informers { + return &Informers{ + config: config, + httpClient: options.HTTPClient, + scheme: options.Scheme, + mapper: options.Mapper, + tracker: tracker{ + Structured: make(map[schema.GroupVersionKind]*Cache), + Unstructured: make(map[schema.GroupVersionKind]*Cache), + Metadata: make(map[schema.GroupVersionKind]*Cache), + }, + codecs: serializer.NewCodecFactory(options.Scheme), + paramCodec: runtime.NewParameterCodec(options.Scheme), + resync: options.ResyncPeriod, + startWait: make(chan struct{}), + namespace: options.Namespace, + byGVK: options.ByGVK, + } +} + +// Cache contains the cached data for an Cache. +type Cache struct { + // Informer is the cached informer + Informer cache.SharedIndexInformer + + // CacheReader wraps Informer and implements the CacheReader interface for a single type + Reader CacheReader +} + +type tracker struct { + Structured map[schema.GroupVersionKind]*Cache + Unstructured map[schema.GroupVersionKind]*Cache + Metadata map[schema.GroupVersionKind]*Cache +} + +// Informers create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type Informers struct { + // httpClient is used to create a new REST client + httpClient *http.Client + + // scheme maps runtime.Objects to GroupVersionKinds + scheme *runtime.Scheme + + // config is used to talk to the apiserver + config *rest.Config + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // tracker tracks informers keyed by their type and groupVersionKind + tracker tracker + + // codecs is used to create a new REST client + codecs serializer.CodecFactory + + // paramCodec is used by list and watch + paramCodec runtime.ParameterCodec + + // resync is the base frequency the informers are resynced + // a 10 percent jitter will be added to the resync period between informers + // so that all informers will not send list requests simultaneously. + resync time.Duration + + // mu guards access to the map + mu sync.RWMutex + + // started is true if the informers have been started + started bool + + // startWait is a channel that is closed after the + // informer has been started. + startWait chan struct{} + + // waitGroup is the wait group that is used to wait for all informers to stop + waitGroup sync.WaitGroup + + // stopped is true if the informers have been stopped + stopped bool + + // ctx is the context to stop informers + ctx context.Context + + // namespace is the namespace that all ListWatches are restricted to + // default or empty string means all namespaces + namespace string + + byGVK map[schema.GroupVersionKind]InformersOptsByGVK +} + +func (ip *Informers) getSelector(gvk schema.GroupVersionKind) Selector { + if ip.byGVK == nil { + return Selector{} + } + if res, ok := ip.byGVK[gvk]; ok { + return res.Selector + } + if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok { + return res.Selector + } + return Selector{} +} + +func (ip *Informers) getTransform(gvk schema.GroupVersionKind) cache.TransformFunc { + if ip.byGVK == nil { + return nil + } + if res, ok := ip.byGVK[gvk]; ok { + return res.Transform + } + if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok { + return res.Transform + } + return nil +} + +func (ip *Informers) getDisableDeepCopy(gvk schema.GroupVersionKind) bool { + if ip.byGVK == nil { + return false + } + if res, ok := ip.byGVK[gvk]; ok && res.UnsafeDisableDeepCopy != nil { + return *res.UnsafeDisableDeepCopy + } + if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok && res.UnsafeDisableDeepCopy != nil { + return *res.UnsafeDisableDeepCopy + } + return false +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +// It doesn't return start because it can't return an error, and it's not a runnable directly. +func (ip *Informers) Start(ctx context.Context) error { + func() { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Set the context so it can be passed to informers that are added later + ip.ctx = ctx + + // Start each informer + for _, i := range ip.tracker.Structured { + ip.startInformerLocked(i.Informer) + } + for _, i := range ip.tracker.Unstructured { + ip.startInformerLocked(i.Informer) + } + for _, i := range ip.tracker.Metadata { + ip.startInformerLocked(i.Informer) + } + + // Set started to true so we immediately start any informers added later. + ip.started = true + close(ip.startWait) + }() + <-ctx.Done() // Block until the context is done + ip.mu.Lock() + ip.stopped = true // Set stopped to true so we don't start any new informers + ip.mu.Unlock() + ip.waitGroup.Wait() // Block until all informers have stopped + return nil +} + +func (ip *Informers) startInformerLocked(informer cache.SharedIndexInformer) { + // Don't start the informer in case we are already waiting for the items in + // the waitGroup to finish, since waitGroups don't support waiting and adding + // at the same time. + if ip.stopped { + return + } + + ip.waitGroup.Add(1) + go func() { + defer ip.waitGroup.Done() + informer.Run(ip.ctx.Done()) + }() +} + +func (ip *Informers) waitForStarted(ctx context.Context) bool { + select { + case <-ip.startWait: + return true + case <-ctx.Done(): + return false + } +} + +// getHasSyncedFuncs returns all the HasSynced functions for the informers in this map. +func (ip *Informers) getHasSyncedFuncs() []cache.InformerSynced { + ip.mu.RLock() + defer ip.mu.RUnlock() + + res := make([]cache.InformerSynced, 0, + len(ip.tracker.Structured)+len(ip.tracker.Unstructured)+len(ip.tracker.Metadata), + ) + for _, i := range ip.tracker.Structured { + res = append(res, i.Informer.HasSynced) + } + for _, i := range ip.tracker.Unstructured { + res = append(res, i.Informer.HasSynced) + } + for _, i := range ip.tracker.Metadata { + res = append(res, i.Informer.HasSynced) + } + return res +} + +// WaitForCacheSync waits until all the caches have been started and synced. +func (ip *Informers) WaitForCacheSync(ctx context.Context) bool { + if !ip.waitForStarted(ctx) { + return false + } + return cache.WaitForCacheSync(ctx.Done(), ip.getHasSyncedFuncs()...) +} + +func (ip *Informers) get(gvk schema.GroupVersionKind, obj runtime.Object) (res *Cache, started bool, ok bool) { + ip.mu.RLock() + defer ip.mu.RUnlock() + i, ok := ip.informersByType(obj)[gvk] + return i, ip.started, ok +} + +// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns +// the Informer from the map. +func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *Cache, error) { + // Return the informer if it is found + i, started, ok := ip.get(gvk, obj) + if !ok { + var err error + if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { + return started, nil, err + } + } + + if started && !i.Informer.HasSynced() { + // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. + if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { + return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) + } + } + + return started, i, nil +} + +func (ip *Informers) informersByType(obj runtime.Object) map[schema.GroupVersionKind]*Cache { + switch obj.(type) { + case runtime.Unstructured: + return ip.tracker.Unstructured + case *metav1.PartialObjectMetadata, *metav1.PartialObjectMetadataList: + return ip.tracker.Metadata + default: + return ip.tracker.Structured + } +} + +func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*Cache, bool, error) { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Check the cache to see if we already have an Informer. If we do, return the Informer. + // This is for the case where 2 routines tried to get the informer when it wasn't in the map + // so neither returned early, but the first one created it. + if i, ok := ip.informersByType(obj)[gvk]; ok { + return i, ip.started, nil + } + + // Create a NewSharedIndexInformer and add it to the map. + listWatcher, err := ip.makeListWatcher(gvk, obj) + if err != nil { + return nil, false, err + } + sharedIndexInformer := cache.NewSharedIndexInformer(&cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.getSelector(gvk).ApplyToList(&opts) + return listWatcher.ListFunc(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.getSelector(gvk).ApplyToList(&opts) + opts.Watch = true // Watch needs to be set to true separately + return listWatcher.WatchFunc(opts) + }, + }, obj, calculateResyncPeriod(ip.resync), cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }) + + // Check to see if there is a transformer for this gvk + if err := sharedIndexInformer.SetTransform(ip.getTransform(gvk)); err != nil { + return nil, false, err + } + + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, false, err + } + + // Create the new entry and set it in the map. + i := &Cache{ + Informer: sharedIndexInformer, + Reader: CacheReader{ + indexer: sharedIndexInformer.GetIndexer(), + groupVersionKind: gvk, + scopeName: mapping.Scope.Name(), + disableDeepCopy: ip.getDisableDeepCopy(gvk), + }, + } + ip.informersByType(obj)[gvk] = i + + // Start the informer in case the InformersMap has started, otherwise it will be + // started when the InformersMap starts. + if ip.started { + ip.startInformerLocked(i.Informer) + } + return i, ip.started, nil +} + +func (ip *Informers) makeListWatcher(gvk schema.GroupVersionKind, obj runtime.Object) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // Figure out if the GVK we're dealing with is global, or namespace scoped. + var namespace string + if mapping.Scope.Name() == meta.RESTScopeNameNamespace { + namespace = restrictNamespaceBySelector(ip.namespace, ip.getSelector(gvk)) + } + + switch obj.(type) { + // + // Unstructured + // + case runtime.Unstructured: + // If the rest configuration has a negotiated serializer passed in, + // we should remove it and use the one that the dynamic client sets for us. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + dynamicClient, err := dynamic.NewForConfigAndClient(cfg, ip.httpClient) + if err != nil { + return nil, err + } + resources := dynamicClient.Resource(mapping.Resource) + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + if namespace != "" { + return resources.Namespace(namespace).List(ip.ctx, opts) + } + return resources.List(ip.ctx, opts) + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + if namespace != "" { + return resources.Namespace(namespace).Watch(ip.ctx, opts) + } + return resources.Watch(ip.ctx, opts) + }, + }, nil + // + // Metadata + // + case *metav1.PartialObjectMetadata, *metav1.PartialObjectMetadataList: + // Always clear the negotiated serializer and use the one + // set from the metadata client. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + + // Grab the metadata metadataClient. + metadataClient, err := metadata.NewForConfigAndClient(cfg, ip.httpClient) + if err != nil { + return nil, err + } + resources := metadataClient.Resource(mapping.Resource) + + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + var ( + list *metav1.PartialObjectMetadataList + err error + ) + if namespace != "" { + list, err = resources.Namespace(namespace).List(ip.ctx, opts) + } else { + list, err = resources.List(ip.ctx, opts) + } + if list != nil { + for i := range list.Items { + list.Items[i].SetGroupVersionKind(gvk) + } + } + return list, err + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watcher watch.Interface, err error) { + if namespace != "" { + watcher, err = resources.Namespace(namespace).Watch(ip.ctx, opts) + } else { + watcher, err = resources.Watch(ip.ctx, opts) + } + if err != nil { + return nil, err + } + return newGVKFixupWatcher(gvk, watcher), nil + }, + }, nil + // + // Structured. + // + default: + client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs, ip.httpClient) + if err != nil { + return nil, err + } + listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") + listObj, err := ip.scheme.New(listGVK) + if err != nil { + return nil, err + } + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + // Build the request. + req := client.Get().Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec) + if namespace != "" { + req.Namespace(namespace) + } + + // Create the resulting object, and execute the request. + res := listObj.DeepCopyObject() + if err := req.Do(ip.ctx).Into(res); err != nil { + return nil, err + } + return res, nil + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + // Build the request. + req := client.Get().Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec) + if namespace != "" { + req.Namespace(namespace) + } + // Call the watch. + return req.Watch(ip.ctx) + }, + }, nil + } +} + +// newGVKFixupWatcher adds a wrapper that preserves the GVK information when +// events come in. +// +// This works around a bug where GVK information is not passed into mapping +// functions when using the OnlyMetadata option in the builder. +// This issue is most likely caused by kubernetes/kubernetes#80609. +// See kubernetes-sigs/controller-runtime#1484. +// +// This was originally implemented as a cache.ResourceEventHandler wrapper but +// that contained a data race which was resolved by setting the GVK in a watch +// wrapper, before the objects are written to the cache. +// See kubernetes-sigs/controller-runtime#1650. +// +// The original watch wrapper was found to be incompatible with +// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a +// watch.Filter which is compatible. +// See kubernetes-sigs/controller-runtime#1789. +func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { + return watch.Filter( + watcher, + func(in watch.Event) (watch.Event, bool) { + in.Object.GetObjectKind().SetGroupVersionKind(gvk) + return in, true + }, + ) +} + +// calculateResyncPeriod returns a duration based on the desired input +// this is so that multiple controllers don't get into lock-step and all +// hammer the apiserver with list requests simultaneously. +func calculateResyncPeriod(resync time.Duration) time.Duration { + // the factor will fall into [0.9, 1.1) + factor := rand.Float64()/5.0 + 0.9 //nolint:gosec + return time.Duration(float64(resync.Nanoseconds()) * factor) +} + +// restrictNamespaceBySelector returns either a global restriction for all ListWatches +// if not default/empty, or the namespace that a ListWatch for the specific resource +// is restricted to, based on a specified field selector for metadata.namespace field. +func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { + if namespaceOpt != "" { + // namespace is already restricted + return namespaceOpt + } + fieldSelector := s.Field + if fieldSelector == nil || fieldSelector.Empty() { + return "" + } + // check whether a selector includes the namespace field + value, found := fieldSelector.RequiresExactMatch("metadata.namespace") + if found { + return value + } + return "" +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go deleted file mode 100644 index 1524d231..00000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go +++ /dev/null @@ -1,480 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "context" - "fmt" - "math/rand" - "sync" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/metadata" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -// clientListWatcherFunc knows how to create a ListWatcher. -type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) - -// newSpecificInformersMap returns a new specificInformersMap (like -// the generical InformersMap, except that it doesn't implement WaitForCacheSync). -func newSpecificInformersMap(config *rest.Config, - scheme *runtime.Scheme, - mapper meta.RESTMapper, - resync time.Duration, - namespace string, - selectors SelectorsByGVK, - disableDeepCopy DisableDeepCopyByGVK, - transformers TransformFuncByObject, - createListWatcher createListWatcherFunc, -) *specificInformersMap { - ip := &specificInformersMap{ - config: config, - Scheme: scheme, - mapper: mapper, - informersByGVK: make(map[schema.GroupVersionKind]*MapEntry), - codecs: serializer.NewCodecFactory(scheme), - paramCodec: runtime.NewParameterCodec(scheme), - resync: resync, - startWait: make(chan struct{}), - createListWatcher: createListWatcher, - namespace: namespace, - selectors: selectors.forGVK, - disableDeepCopy: disableDeepCopy, - transformers: transformers, - } - return ip -} - -// MapEntry contains the cached data for an Informer. -type MapEntry struct { - // Informer is the cached informer - Informer cache.SharedIndexInformer - - // CacheReader wraps Informer and implements the CacheReader interface for a single type - Reader CacheReader -} - -// specificInformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. -// It uses a standard parameter codec constructed based on the given generated Scheme. -type specificInformersMap struct { - // Scheme maps runtime.Objects to GroupVersionKinds - Scheme *runtime.Scheme - - // config is used to talk to the apiserver - config *rest.Config - - // mapper maps GroupVersionKinds to Resources - mapper meta.RESTMapper - - // informersByGVK is the cache of informers keyed by groupVersionKind - informersByGVK map[schema.GroupVersionKind]*MapEntry - - // codecs is used to create a new REST client - codecs serializer.CodecFactory - - // paramCodec is used by list and watch - paramCodec runtime.ParameterCodec - - // stop is the stop channel to stop informers - stop <-chan struct{} - - // resync is the base frequency the informers are resynced - // a 10 percent jitter will be added to the resync period between informers - // so that all informers will not send list requests simultaneously. - resync time.Duration - - // mu guards access to the map - mu sync.RWMutex - - // start is true if the informers have been started - started bool - - // startWait is a channel that is closed after the - // informer has been started. - startWait chan struct{} - - // createClient knows how to create a client and a list object, - // and allows for abstracting over the particulars of structured vs - // unstructured objects. - createListWatcher createListWatcherFunc - - // namespace is the namespace that all ListWatches are restricted to - // default or empty string means all namespaces - namespace string - - // selectors are the label or field selectors that will be added to the - // ListWatch ListOptions. - selectors func(gvk schema.GroupVersionKind) Selector - - // disableDeepCopy indicates not to deep copy objects during get or list objects. - disableDeepCopy DisableDeepCopyByGVK - - // transform funcs are applied to objects before they are committed to the cache - transformers TransformFuncByObject -} - -// Start calls Run on each of the informers and sets started to true. Blocks on the context. -// It doesn't return start because it can't return an error, and it's not a runnable directly. -func (ip *specificInformersMap) Start(ctx context.Context) { - func() { - ip.mu.Lock() - defer ip.mu.Unlock() - - // Set the stop channel so it can be passed to informers that are added later - ip.stop = ctx.Done() - - // Start each informer - for _, informer := range ip.informersByGVK { - go informer.Informer.Run(ctx.Done()) - } - - // Set started to true so we immediately start any informers added later. - ip.started = true - close(ip.startWait) - }() - <-ctx.Done() -} - -func (ip *specificInformersMap) waitForStarted(ctx context.Context) bool { - select { - case <-ip.startWait: - return true - case <-ctx.Done(): - return false - } -} - -// HasSyncedFuncs returns all the HasSynced functions for the informers in this map. -func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced { - ip.mu.RLock() - defer ip.mu.RUnlock() - syncedFuncs := make([]cache.InformerSynced, 0, len(ip.informersByGVK)) - for _, informer := range ip.informersByGVK { - syncedFuncs = append(syncedFuncs, informer.Informer.HasSynced) - } - return syncedFuncs -} - -// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns -// the Informer from the map. -func (ip *specificInformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { - // Return the informer if it is found - i, started, ok := func() (*MapEntry, bool, bool) { - ip.mu.RLock() - defer ip.mu.RUnlock() - i, ok := ip.informersByGVK[gvk] - return i, ip.started, ok - }() - - if !ok { - var err error - if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { - return started, nil, err - } - } - - if started && !i.Informer.HasSynced() { - // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. - if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { - return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) - } - } - - return started, i, nil -} - -func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) { - ip.mu.Lock() - defer ip.mu.Unlock() - - // Check the cache to see if we already have an Informer. If we do, return the Informer. - // This is for the case where 2 routines tried to get the informer when it wasn't in the map - // so neither returned early, but the first one created it. - if i, ok := ip.informersByGVK[gvk]; ok { - return i, ip.started, nil - } - - // Create a NewSharedIndexInformer and add it to the map. - var lw *cache.ListWatch - lw, err := ip.createListWatcher(gvk, ip) - if err != nil { - return nil, false, err - } - ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{ - cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, - }) - - // Check to see if there is a transformer for this gvk - if err := ni.SetTransform(ip.transformers.Get(gvk)); err != nil { - return nil, false, err - } - - rm, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, false, err - } - - i := &MapEntry{ - Informer: ni, - Reader: CacheReader{ - indexer: ni.GetIndexer(), - groupVersionKind: gvk, - scopeName: rm.Scope.Name(), - disableDeepCopy: ip.disableDeepCopy.IsDisabled(gvk), - }, - } - ip.informersByGVK[gvk] = i - - // Start the Informer if need by - // TODO(seans): write thorough tests and document what happens here - can you add indexers? - // can you add eventhandlers? - if ip.started { - go i.Informer.Run(ip.stop) - } - return i, ip.started, nil -} - -// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer. -func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs) - if err != nil { - return nil, err - } - listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") - listObj, err := ip.Scheme.New(listGVK) - if err != nil { - return nil, err - } - - // TODO: the functions that make use of this ListWatch should be adapted to - // pass in their own contexts instead of relying on this fixed one here. - ctx := context.TODO() - // Create a new ListWatch for the obj - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.selectors(gvk).ApplyToList(&opts) - res := listObj.DeepCopyObject() - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) - return res, err - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.selectors(gvk).ApplyToList(&opts) - // Watch needs to be set to true separately - opts.Watch = true - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx) - }, - }, nil -} - -func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - // If the rest configuration has a negotiated serializer passed in, - // we should remove it and use the one that the dynamic client sets for us. - cfg := rest.CopyConfig(ip.config) - cfg.NegotiatedSerializer = nil - dynamicClient, err := dynamic.NewForConfig(cfg) - if err != nil { - return nil, err - } - - // TODO: the functions that make use of this ListWatch should be adapted to - // pass in their own contexts instead of relying on this fixed one here. - ctx := context.TODO() - // Create a new ListWatch for the obj - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.selectors(gvk).ApplyToList(&opts) - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) - } - return dynamicClient.Resource(mapping.Resource).List(ctx, opts) - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.selectors(gvk).ApplyToList(&opts) - // Watch needs to be set to true separately - opts.Watch = true - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) - } - return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts) - }, - }, nil -} - -func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - // Always clear the negotiated serializer and use the one - // set from the metadata client. - cfg := rest.CopyConfig(ip.config) - cfg.NegotiatedSerializer = nil - - // grab the metadata client - client, err := metadata.NewForConfig(cfg) - if err != nil { - return nil, err - } - - // TODO: the functions that make use of this ListWatch should be adapted to - // pass in their own contexts instead of relying on this fixed one here. - ctx := context.TODO() - - // create the relevant listwatch - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.selectors(gvk).ApplyToList(&opts) - - var ( - list *metav1.PartialObjectMetadataList - err error - ) - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - list, err = client.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) - } else { - list, err = client.Resource(mapping.Resource).List(ctx, opts) - } - if list != nil { - for i := range list.Items { - list.Items[i].SetGroupVersionKind(gvk) - } - } - return list, err - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.selectors(gvk).ApplyToList(&opts) - // Watch needs to be set to true separately - opts.Watch = true - - var ( - watcher watch.Interface - err error - ) - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - watcher, err = client.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) - } else { - watcher, err = client.Resource(mapping.Resource).Watch(ctx, opts) - } - if watcher != nil { - watcher = newGVKFixupWatcher(gvk, watcher) - } - return watcher, err - }, - }, nil -} - -// newGVKFixupWatcher adds a wrapper that preserves the GVK information when -// events come in. -// -// This works around a bug where GVK information is not passed into mapping -// functions when using the OnlyMetadata option in the builder. -// This issue is most likely caused by kubernetes/kubernetes#80609. -// See kubernetes-sigs/controller-runtime#1484. -// -// This was originally implemented as a cache.ResourceEventHandler wrapper but -// that contained a data race which was resolved by setting the GVK in a watch -// wrapper, before the objects are written to the cache. -// See kubernetes-sigs/controller-runtime#1650. -// -// The original watch wrapper was found to be incompatible with -// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a -// watch.Filter which is compatible. -// See kubernetes-sigs/controller-runtime#1789. -func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { - return watch.Filter( - watcher, - func(in watch.Event) (watch.Event, bool) { - in.Object.GetObjectKind().SetGroupVersionKind(gvk) - return in, true - }, - ) -} - -// resyncPeriod returns a function which generates a duration each time it is -// invoked; this is so that multiple controllers don't get into lock-step and all -// hammer the apiserver with list requests simultaneously. -func resyncPeriod(resync time.Duration) func() time.Duration { - return func() time.Duration { - // the factor will fall into [0.9, 1.1) - factor := rand.Float64()/5.0 + 0.9 //nolint:gosec - return time.Duration(float64(resync.Nanoseconds()) * factor) - } -} - -// restrictNamespaceBySelector returns either a global restriction for all ListWatches -// if not default/empty, or the namespace that a ListWatch for the specific resource -// is restricted to, based on a specified field selector for metadata.namespace field. -func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { - if namespaceOpt != "" { - // namespace is already restricted - return namespaceOpt - } - fieldSelector := s.Field - if fieldSelector == nil || fieldSelector.Empty() { - return "" - } - // check whether a selector includes the namespace field - value, found := fieldSelector.RequiresExactMatch("metadata.namespace") - if found { - return value - } - return "" -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go index 4eff32fb..c674379b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go @@ -20,23 +20,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// SelectorsByGVK associate a GroupVersionKind to a field/label selector. -type SelectorsByGVK map[schema.GroupVersionKind]Selector - -func (s SelectorsByGVK) forGVK(gvk schema.GroupVersionKind) Selector { - if specific, found := s[gvk]; found { - return specific - } - if defaultSelector, found := s[schema.GroupVersionKind{}]; found { - return defaultSelector - } - - return Selector{} -} - // Selector specify the label/field selector to fill in ListOptions. type Selector struct { Label labels.Selector diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go index f69e0226..0725f550 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go @@ -8,9 +8,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// TransformFuncByObject provides access to the correct transform function for +// TransformFuncByGVK provides access to the correct transform function for // any given GVK. -type TransformFuncByObject interface { +type TransformFuncByGVK interface { Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error Get(schema.GroupVersionKind) cache.TransformFunc SetDefault(transformer cache.TransformFunc) @@ -21,9 +21,9 @@ type transformFuncByGVK struct { transformers map[schema.GroupVersionKind]cache.TransformFunc } -// TransformFuncByObjectFromMap creates a TransformFuncByObject from a map that +// TransformFuncByGVKFromMap creates a TransformFuncByGVK from a map that // maps GVKs to TransformFuncs. -func TransformFuncByObjectFromMap(in map[schema.GroupVersionKind]cache.TransformFunc) TransformFuncByObject { +func TransformFuncByGVKFromMap(in map[schema.GroupVersionKind]cache.TransformFunc) TransformFuncByGVK { byGVK := &transformFuncByGVK{} if defaultFunc, hasDefault := in[schema.GroupVersionKind{}]; hasDefault { byGVK.defaultTransform = defaultFunc diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go index fccb3647..ac97beae 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -28,12 +28,9 @@ import ( "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// NewCacheFunc - Function for creating a new cache from the options and a rest config. -type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) - // a new global namespaced cache to handle cluster scoped resources. const globalCache = "_cluster-scope" @@ -43,31 +40,43 @@ const globalCache = "_cluster-scope" // a global cache for cluster scoped resource. Note that this is not intended // to be used for excluding namespaces, this is better done via a Predicate. Also note that // you may face performance issues when using this with a high number of namespaces. +// +// Deprecated: Use cache.Options.Namespaces instead. func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { return func(config *rest.Config, opts Options) (Cache, error) { - opts, err := defaultOpts(config, opts) - if err != nil { - return nil, err - } + opts.Namespaces = namespaces + return newMultiNamespaceCache(config, opts) + } +} - caches := map[string]Cache{} +func newMultiNamespaceCache(config *rest.Config, opts Options) (Cache, error) { + if len(opts.Namespaces) < 2 { + return nil, fmt.Errorf("must specify more than one namespace to use multi-namespace cache") + } + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } - // create a cache for cluster scoped resources - gCache, err := New(config, opts) + // Create every namespace cache. + caches := map[string]Cache{} + for _, ns := range opts.Namespaces { + opts.Namespaces = []string{ns} + c, err := New(config, opts) if err != nil { - return nil, fmt.Errorf("error creating global cache: %w", err) + return nil, err } + caches[ns] = c + } - for _, ns := range namespaces { - opts.Namespace = ns - c, err := New(config, opts) - if err != nil { - return nil, err - } - caches[ns] = c - } - return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil + // Create a cache for cluster scoped resources. + opts.Namespaces = []string{} + gCache, err := New(config, opts) + if err != nil { + return nil, fmt.Errorf("error creating global cache: %w", err) } + + return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil } // multiNamespaceCache knows how to handle multiple namespaced caches @@ -89,7 +98,7 @@ func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object // If the object is clusterscoped, get the informer from clusterCache, // if not use the namespaced caches. - isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { return nil, err } @@ -119,7 +128,7 @@ func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema // If the object is clusterscoped, get the informer from clusterCache, // if not use the namespaced caches. - isNamespaced, err := objectutil.IsAPINamespacedWithGVK(gvk, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsGVKNamespaced(gvk, c.RESTMapper) if err != nil { return nil, err } @@ -183,9 +192,9 @@ func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { } func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { - return nil //nolint:nilerr + return err } if !isNamespaced { @@ -201,7 +210,7 @@ func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, } func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { return err } @@ -223,7 +232,7 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) - isNamespaced, err := objectutil.IsAPINamespaced(list, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(list, c.Scheme, c.RESTMapper) if err != nil { return err } @@ -293,42 +302,63 @@ type multiNamespaceInformer struct { namespaceToInformer map[string]Informer } +type handlerRegistration struct { + handles map[string]toolscache.ResourceEventHandlerRegistration +} + +type syncer interface { + HasSynced() bool +} + +// HasSynced asserts that the handler has been called for the full initial state of the informer. +// This uses syncer to be compatible between client-go 1.27+ and older versions when the interface changed. +func (h handlerRegistration) HasSynced() bool { + for _, reg := range h.handles { + if s, ok := reg.(syncer); ok { + if !s.HasSynced() { + return false + } + } + } + return true +} + var _ Informer = &multiNamespaceInformer{} // AddEventHandler adds the handler to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) { - handles := make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)) + handles := handlerRegistration{handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))} for ns, informer := range i.namespaceToInformer { registration, err := informer.AddEventHandler(handler) if err != nil { return nil, err } - handles[ns] = registration + handles.handles[ns] = registration } return handles, nil } // AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) { - handles := make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)) + handles := handlerRegistration{handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))} for ns, informer := range i.namespaceToInformer { registration, err := informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) if err != nil { return nil, err } - handles[ns] = registration + handles.handles[ns] = registration } return handles, nil } // RemoveEventHandler removes a formerly added event handler given by its registration handle. func (i *multiNamespaceInformer) RemoveEventHandler(h toolscache.ResourceEventHandlerRegistration) error { - handles, ok := h.(map[string]toolscache.ResourceEventHandlerRegistration) + handles, ok := h.(handlerRegistration) if !ok { return fmt.Errorf("it is not the registration returned by multiNamespaceInformer") } for ns, informer := range i.namespaceToInformer { - registration, ok := handles[ns] + registration, ok := handles.handles[ns] if !ok { continue } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go index 1030013d..2b9b60d8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go @@ -19,9 +19,14 @@ package certwatcher import ( "context" "crypto/tls" + "fmt" "sync" + "time" "github.com/fsnotify/fsnotify" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) @@ -39,6 +44,9 @@ type CertWatcher struct { certPath string keyPath string + + // callback is a function to be invoked when the certificate changes. + callback func(tls.Certificate) } // New returns a new CertWatcher watching the given certificate and key. @@ -63,6 +71,17 @@ func New(certPath, keyPath string) (*CertWatcher, error) { return cw, nil } +// RegisterCallback registers a callback to be invoked when the certificate changes. +func (cw *CertWatcher) RegisterCallback(callback func(tls.Certificate)) { + cw.Lock() + defer cw.Unlock() + // If the current certificate is not nil, invoke the callback immediately. + if cw.currentCert != nil { + callback(*cw.currentCert) + } + cw.callback = callback +} + // GetCertificate fetches the currently loaded certificate, which may be nil. func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { cw.RLock() @@ -72,11 +91,22 @@ func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, // Start starts the watch on the certificate and key files. func (cw *CertWatcher) Start(ctx context.Context) error { - files := []string{cw.certPath, cw.keyPath} - - for _, f := range files { - if err := cw.watcher.Add(f); err != nil { - return err + files := sets.New(cw.certPath, cw.keyPath) + + { + var watchErr error + if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Second, true, func(ctx context.Context) (done bool, err error) { + for _, f := range files.UnsortedList() { + if err := cw.watcher.Add(f); err != nil { + watchErr = err + return false, nil //nolint:nilerr // We want to keep trying. + } + // We've added the watch, remove it from the set. + files.Delete(f) + } + return true, nil + }); err != nil { + return fmt.Errorf("failed to add watches: %w", kerrors.NewAggregate([]error{err, watchErr})) } } @@ -130,6 +160,14 @@ func (cw *CertWatcher) ReadCertificate() error { log.Info("Updated current TLS certificate") + // If a callback is registered, invoke it with the new certificate. + cw.RLock() + defer cw.RUnlock() + if cw.callback != nil { + go func() { + cw.callback(cert) + }() + } return nil } @@ -154,13 +192,13 @@ func (cw *CertWatcher) handleEvent(event fsnotify.Event) { } func isWrite(event fsnotify.Event) bool { - return event.Op&fsnotify.Write == fsnotify.Write + return event.Op.Has(fsnotify.Write) } func isCreate(event fsnotify.Event) bool { - return event.Op&fsnotify.Create == fsnotify.Create + return event.Op.Has(fsnotify.Create) } func isRemove(event fsnotify.Event) bool { - return event.Op&fsnotify.Remove == fsnotify.Remove + return event.Op.Has(fsnotify.Remove) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index 8e2ac48f..6a1bfb54 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -20,7 +20,9 @@ limitations under the License. package apiutil import ( + "errors" "fmt" + "net/http" "reflect" "sync" @@ -30,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" @@ -59,9 +62,13 @@ func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { // NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery // information fetched by a new client with the given config. -func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { +func NewDiscoveryRESTMapper(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + // Get a mapper - dc, err := discovery.NewDiscoveryClientForConfig(c) + dc, err := discovery.NewDiscoveryClientForConfigAndClient(c, httpClient) if err != nil { return nil, err } @@ -72,6 +79,36 @@ func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { return restmapper.NewDiscoveryRESTMapper(gr), nil } +// IsObjectNamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsObjectNamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper meta.RESTMapper) (bool, error) { + gvk, err := GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + return IsGVKNamespaced(gvk, restmapper) +} + +// IsGVKNamespaced returns true if the object having the provided +// GVK is namespace scoped. +func IsGVKNamespaced(gvk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + if scope == "" { + return false, errors.New("scope cannot be identified, empty scope returned") + } + + if scope != meta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} + // GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { // TODO(directxman12): do we want to generalize this to arbitrary container types? @@ -142,21 +179,11 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi // RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated // with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from // baseConfig, if set, otherwise a default serializer will be set. -func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { - return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs)) -} - -// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory -// in order to avoid clearing the GVK from the decoded object. -// -// See https://github.com/kubernetes/kubernetes/issues/80609. -type serializerWithDecodedGVK struct { - serializer.WithoutConversionCodecFactory -} - -// DecoderToVersion returns an decoder that does not do conversion. -func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { - return serializer +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory, httpClient *http.Client) (rest.Interface, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + return rest.RESTClientForConfigAndClient(createRestConfig(gvk, isUnstructured, baseConfig, codecs), httpClient) } // createRestConfig copies the base config and updates needed fields for a new rest config. @@ -183,9 +210,8 @@ func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConf } if isUnstructured { - // If the object is unstructured, we need to preserve the GVK information. - // Use our own custom serializer. - cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + // If the object is unstructured, we use the client-go dynamic serializer. + cfg = dynamic.ConfigFor(cfg) } else { cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go deleted file mode 100644 index 6b9dcf68..00000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go +++ /dev/null @@ -1,301 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiutil - -import ( - "sync" - "sync/atomic" - - "golang.org/x/time/rate" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" -) - -// dynamicRESTMapper is a RESTMapper that dynamically discovers resource -// types at runtime. -type dynamicRESTMapper struct { - mu sync.RWMutex // protects the following fields - staticMapper meta.RESTMapper - limiter *rate.Limiter - newMapper func() (meta.RESTMapper, error) - - lazy bool - // Used for lazy init. - inited uint32 - initMtx sync.Mutex - - useLazyRestmapper bool -} - -// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. -type DynamicRESTMapperOption func(*dynamicRESTMapper) error - -// WithLimiter sets the RESTMapper's underlying limiter to lim. -func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { - return func(drm *dynamicRESTMapper) error { - drm.limiter = lim - return nil - } -} - -// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings -// until an API call is made. -var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { - drm.lazy = true - return nil -} - -// WithExperimentalLazyMapper enables experimental more advanced Lazy Restmapping mechanism. -var WithExperimentalLazyMapper DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { - drm.useLazyRestmapper = true - return nil -} - -// WithCustomMapper supports setting a custom RESTMapper refresher instead of -// the default method, which uses a discovery client. -// -// This exists mainly for testing, but can be useful if you need tighter control -// over how discovery is performed, which discovery endpoints are queried, etc. -func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { - return func(drm *dynamicRESTMapper) error { - drm.newMapper = newMapper - return nil - } -} - -// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic -// RESTMapper dynamically discovers resource types at runtime. opts -// configure the RESTMapper. -func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { - client, err := discovery.NewDiscoveryClientForConfig(cfg) - if err != nil { - return nil, err - } - drm := &dynamicRESTMapper{ - limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), - newMapper: func() (meta.RESTMapper, error) { - groupResources, err := restmapper.GetAPIGroupResources(client) - if err != nil { - return nil, err - } - return restmapper.NewDiscoveryRESTMapper(groupResources), nil - }, - } - for _, opt := range opts { - if err = opt(drm); err != nil { - return nil, err - } - } - if drm.useLazyRestmapper { - return newLazyRESTMapperWithClient(client) - } - if !drm.lazy { - if err := drm.setStaticMapper(); err != nil { - return nil, err - } - } - return drm, nil -} - -var ( - // defaultRefilRate is the default rate at which potential calls are - // added back to the "bucket" of allowed calls. - defaultRefillRate = 5 - // defaultLimitSize is the default starting/max number of potential calls - // per second. Once a call is used, it's added back to the bucket at a rate - // of defaultRefillRate per second. - defaultLimitSize = 5 -) - -// setStaticMapper sets drm's staticMapper by querying its client, regardless -// of reload backoff. -func (drm *dynamicRESTMapper) setStaticMapper() error { - newMapper, err := drm.newMapper() - if err != nil { - return err - } - drm.staticMapper = newMapper - return nil -} - -// init initializes drm only once if drm is lazy. -func (drm *dynamicRESTMapper) init() (err error) { - // skip init if drm is not lazy or has initialized - if !drm.lazy || atomic.LoadUint32(&drm.inited) != 0 { - return nil - } - - drm.initMtx.Lock() - defer drm.initMtx.Unlock() - if drm.inited == 0 { - if err = drm.setStaticMapper(); err == nil { - atomic.StoreUint32(&drm.inited, 1) - } - } - return err -} - -// checkAndReload attempts to call the given callback, which is assumed to be dependent -// on the data in the restmapper. -// -// If the callback returns an error matching meta.IsNoMatchErr, it will attempt to reload -// the RESTMapper's data and re-call the callback once that's occurred. -// If the callback returns any other error, the function will return immediately regardless. -// -// It will take care of ensuring that reloads are rate-limited and that extraneous calls -// aren't made. If a reload would exceed the limiters rate, it returns the error return by -// the callback. -// It's thread-safe, and worries about thread-safety for the callback (so the callback does -// not need to attempt to lock the restmapper). -func (drm *dynamicRESTMapper) checkAndReload(checkNeedsReload func() error) error { - // first, check the common path -- data is fresh enough - // (use an IIFE for the lock's defer) - err := func() error { - drm.mu.RLock() - defer drm.mu.RUnlock() - - return checkNeedsReload() - }() - - needsReload := meta.IsNoMatchError(err) - if !needsReload { - return err - } - - // if the data wasn't fresh, we'll need to try and update it, so grab the lock... - drm.mu.Lock() - defer drm.mu.Unlock() - - // ... and double-check that we didn't reload in the meantime - err = checkNeedsReload() - needsReload = meta.IsNoMatchError(err) - if !needsReload { - return err - } - - // we're still stale, so grab a rate-limit token if we can... - if !drm.limiter.Allow() { - // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter) - // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError - return err - } - - // ...reload... - if err := drm.setStaticMapper(); err != nil { - return err - } - - // ...and return the results of the closure regardless - return checkNeedsReload() -} - -// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. - -func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - if err := drm.init(); err != nil { - return schema.GroupVersionKind{}, err - } - var gvk schema.GroupVersionKind - err := drm.checkAndReload(func() error { - var err error - gvk, err = drm.staticMapper.KindFor(resource) - return err - }) - return gvk, err -} - -func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - if err := drm.init(); err != nil { - return nil, err - } - var gvks []schema.GroupVersionKind - err := drm.checkAndReload(func() error { - var err error - gvks, err = drm.staticMapper.KindsFor(resource) - return err - }) - return gvks, err -} - -func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - if err := drm.init(); err != nil { - return schema.GroupVersionResource{}, err - } - - var gvr schema.GroupVersionResource - err := drm.checkAndReload(func() error { - var err error - gvr, err = drm.staticMapper.ResourceFor(input) - return err - }) - return gvr, err -} - -func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - if err := drm.init(); err != nil { - return nil, err - } - var gvrs []schema.GroupVersionResource - err := drm.checkAndReload(func() error { - var err error - gvrs, err = drm.staticMapper.ResourcesFor(input) - return err - }) - return gvrs, err -} - -func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - if err := drm.init(); err != nil { - return nil, err - } - var mapping *meta.RESTMapping - err := drm.checkAndReload(func() error { - var err error - mapping, err = drm.staticMapper.RESTMapping(gk, versions...) - return err - }) - return mapping, err -} - -func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - if err := drm.init(); err != nil { - return nil, err - } - var mappings []*meta.RESTMapping - err := drm.checkAndReload(func() error { - var err error - mappings, err = drm.staticMapper.RESTMappings(gk, versions...) - return err - }) - return mappings, err -} - -func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { - if err := drm.init(); err != nil { - return "", err - } - var singular string - err := drm.checkAndReload(func() error { - var err error - singular, err = drm.staticMapper.ResourceSingularizer(resource) - return err - }) - return singular, err -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go similarity index 57% rename from vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go index e9b1e710..e0ff72dc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go @@ -18,137 +18,151 @@ package apiutil import ( "fmt" + "net/http" "sync" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" ) -// lazyRESTMapper is a RESTMapper that will lazily query the provided +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. +func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + + client, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, httpClient) + if err != nil { + return nil, err + } + return &mapper{ + mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), + client: client, + knownGroups: map[string]*restmapper.APIGroupResources{}, + apiGroups: map[string]*metav1.APIGroup{}, + }, nil +} + +// mapper is a RESTMapper that will lazily query the provided // client for discovery information to do REST mappings. -type lazyRESTMapper struct { +type mapper struct { mapper meta.RESTMapper client *discovery.DiscoveryClient knownGroups map[string]*restmapper.APIGroupResources - apiGroups []metav1.APIGroup + apiGroups map[string]*metav1.APIGroup // mutex to provide thread-safe mapper reloading. - mu sync.Mutex -} - -// newLazyRESTMapperWithClient initializes a LazyRESTMapper with a custom discovery client. -func newLazyRESTMapperWithClient(discoveryClient *discovery.DiscoveryClient) (meta.RESTMapper, error) { - return &lazyRESTMapper{ - mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), - client: discoveryClient, - knownGroups: map[string]*restmapper.APIGroupResources{}, - apiGroups: []metav1.APIGroup{}, - }, nil + mu sync.RWMutex } // KindFor implements Mapper.KindFor. -func (m *lazyRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - res, err := m.mapper.KindFor(resource) +func (m *mapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + res, err := m.getMapper().KindFor(resource) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return schema.GroupVersionKind{}, err } - - res, err = m.mapper.KindFor(resource) + res, err = m.getMapper().KindFor(resource) } return res, err } // KindsFor implements Mapper.KindsFor. -func (m *lazyRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - res, err := m.mapper.KindsFor(resource) +func (m *mapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + res, err := m.getMapper().KindsFor(resource) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return nil, err } - - res, err = m.mapper.KindsFor(resource) + res, err = m.getMapper().KindsFor(resource) } return res, err } // ResourceFor implements Mapper.ResourceFor. -func (m *lazyRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - res, err := m.mapper.ResourceFor(input) +func (m *mapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourceFor(input) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(input.Group, input.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return schema.GroupVersionResource{}, err } - - res, err = m.mapper.ResourceFor(input) + res, err = m.getMapper().ResourceFor(input) } return res, err } // ResourcesFor implements Mapper.ResourcesFor. -func (m *lazyRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - res, err := m.mapper.ResourcesFor(input) +func (m *mapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourcesFor(input) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(input.Group, input.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return nil, err } - - res, err = m.mapper.ResourcesFor(input) + res, err = m.getMapper().ResourcesFor(input) } return res, err } // RESTMapping implements Mapper.RESTMapping. -func (m *lazyRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - res, err := m.mapper.RESTMapping(gk, versions...) +func (m *mapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMapping(gk, versions...) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(gk.Group, versions...); err != nil { - return res, err + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err } - - res, err = m.mapper.RESTMapping(gk, versions...) + res, err = m.getMapper().RESTMapping(gk, versions...) } return res, err } // RESTMappings implements Mapper.RESTMappings. -func (m *lazyRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - res, err := m.mapper.RESTMappings(gk, versions...) +func (m *mapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMappings(gk, versions...) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(gk.Group, versions...); err != nil { - return res, err + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err } - - res, err = m.mapper.RESTMappings(gk, versions...) + res, err = m.getMapper().RESTMappings(gk, versions...) } return res, err } // ResourceSingularizer implements Mapper.ResourceSingularizer. -func (m *lazyRESTMapper) ResourceSingularizer(resource string) (string, error) { - return m.mapper.ResourceSingularizer(resource) +func (m *mapper) ResourceSingularizer(resource string) (string, error) { + return m.getMapper().ResourceSingularizer(resource) +} + +func (m *mapper) getMapper() meta.RESTMapper { + m.mu.RLock() + defer m.mu.RUnlock() + return m.mapper } // addKnownGroupAndReload reloads the mapper with updated information about missing API group. // versions can be specified for partial updates, for instance for v1beta1 version only. -func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...string) error { - m.mu.Lock() - defer m.mu.Unlock() +func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) error { + // versions will here be [""] if the forwarded Version value of + // GroupVersionResource (in calling method) was not specified. + if len(versions) == 1 && versions[0] == "" { + versions = nil + } // If no specific versions are set by user, we will scan all available ones for the API group. // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls // this data will be taken from cache. if len(versions) == 0 { - apiGroup, err := m.findAPIGroupByNameLocked(groupName) + apiGroup, err := m.findAPIGroupByName(groupName) if err != nil { return err } @@ -157,6 +171,9 @@ func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...st } } + m.mu.Lock() + defer m.mu.Unlock() + // Create or fetch group resources from cache. groupResources := &restmapper.APIGroupResources{ Group: metav1.APIGroup{Name: groupName}, @@ -205,43 +222,53 @@ func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...st } m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) - return nil } // findAPIGroupByNameLocked returns API group by its name. -func (m *lazyRESTMapper) findAPIGroupByNameLocked(groupName string) (metav1.APIGroup, error) { +func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) { // Looking in the cache first. - for _, apiGroup := range m.apiGroups { - if groupName == apiGroup.Name { - return apiGroup, nil + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil } } // Update the cache if nothing was found. apiGroups, err := m.client.ServerGroups() if err != nil { - return metav1.APIGroup{}, fmt.Errorf("failed to get server groups: %w", err) + return nil, fmt.Errorf("failed to get server groups: %w", err) } if len(apiGroups.Groups) == 0 { - return metav1.APIGroup{}, fmt.Errorf("received an empty API groups list") + return nil, fmt.Errorf("received an empty API groups list") } - m.apiGroups = apiGroups.Groups + m.mu.Lock() + for i := range apiGroups.Groups { + group := &apiGroups.Groups[i] + m.apiGroups[group.Name] = group + } + m.mu.Unlock() // Looking in the cache again. - for _, apiGroup := range m.apiGroups { - if groupName == apiGroup.Name { - return apiGroup, nil + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil } } // If there is still nothing, return an error. - return metav1.APIGroup{}, fmt.Errorf("failed to find API group %s", groupName) + return nil, fmt.Errorf("failed to find API group %q", groupName) } // fetchGroupVersionResources fetches the resources for the specified group and its versions. -func (m *lazyRESTMapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { +func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) failedGroups := make(map[schema.GroupVersion]error) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 7d1ed5c9..0d8b9fbe 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -20,11 +20,11 @@ import ( "context" "errors" "fmt" + "net/http" "strings" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -36,6 +36,28 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ) +// Options are creation options for a Client. +type Options struct { + // HTTPClient is the HTTP client to use for requests. + HTTPClient *http.Client + + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Cache, if provided, is used to read objects from the cache. + Cache *CacheOptions + + // WarningHandler is used to configure the warning handler responsible for + // surfacing and handling warnings messages sent by the API server. + WarningHandler WarningHandlerOptions + + // DryRun instructs the client to only perform dry run requests. + DryRun *bool +} + // WarningHandlerOptions are options for configuring a // warning handler for the client which is responsible // for surfacing API Server warnings. @@ -50,19 +72,21 @@ type WarningHandlerOptions struct { AllowDuplicateLogs bool } -// Options are creation options for a Client. -type Options struct { - // Scheme, if provided, will be used to map go structs to GroupVersionKinds - Scheme *runtime.Scheme - - // Mapper, if provided, will be used to map GroupVersionKinds to Resources - Mapper meta.RESTMapper - - // Opts is used to configure the warning handler responsible for - // surfacing and handling warnings messages sent by the API server. - Opts WarningHandlerOptions +// CacheOptions are options for creating a cache-backed client. +type CacheOptions struct { + // Reader is a cache-backed reader that will be used to read objects from the cache. + // +required + Reader Reader + // DisableFor is a list of objects that should not be read from the cache. + DisableFor []Object + // Unstructured is a flag that indicates whether the cache-backed client should + // read unstructured objects or lists from the cache. + Unstructured bool } +// NewClientFunc allows a user to define how to create a client. +type NewClientFunc func(config *rest.Config, options Options) (Client, error) + // New returns a new Client using the provided config and Options. // The returned client reads *and* writes directly from the server // (it doesn't use object caches). It understands how to work with @@ -73,8 +97,12 @@ type Options struct { // corresponding group, version, and kind for the given type. In the // case of unstructured types, the group, version, and kind will be extracted // from the corresponding fields on the object. -func New(config *rest.Config, options Options) (Client, error) { - return newClient(config, options) +func New(config *rest.Config, options Options) (c Client, err error) { + c, err = newClient(config, options) + if err == nil && options.DryRun != nil && *options.DryRun { + c = NewDryRunClient(c) + } + return c, err } func newClient(config *rest.Config, options Options) (*client, error) { @@ -82,22 +110,35 @@ func newClient(config *rest.Config, options Options) (*client, error) { return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") } - if !options.Opts.SuppressWarnings { + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + if !options.WarningHandler.SuppressWarnings { // surface warnings logger := log.Log.WithName("KubeAPIWarningLogger") // Set a WarningHandler, the default WarningHandler // is log.KubeAPIWarningLogger with deduplication enabled. // See log.KubeAPIWarningLoggerOptions for considerations // regarding deduplication. - config = rest.CopyConfig(config) config.WarningHandler = log.NewKubeAPIWarningLogger( logger, log.KubeAPIWarningLoggerOptions{ - Deduplicate: !options.Opts.AllowDuplicateLogs, + Deduplicate: !options.WarningHandler.AllowDuplicateLogs, }, ) } + // Use the rest HTTP client for the provided config if unset + if options.HTTPClient == nil { + var err error + options.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + } + // Init a scheme if none provided if options.Scheme == nil { options.Scheme = scheme.Scheme @@ -106,34 +147,35 @@ func newClient(config *rest.Config, options Options) (*client, error) { // Init a Mapper if none provided if options.Mapper == nil { var err error - options.Mapper, err = apiutil.NewDynamicRESTMapper(config) + options.Mapper, err = apiutil.NewDynamicRESTMapper(config, options.HTTPClient) if err != nil { return nil, err } } - clientcache := &clientCache{ - config: config, - scheme: options.Scheme, - mapper: options.Mapper, - codecs: serializer.NewCodecFactory(options.Scheme), + resources := &clientRestResources{ + httpClient: options.HTTPClient, + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), } - rawMetaClient, err := metadata.NewForConfig(config) + rawMetaClient, err := metadata.NewForConfigAndClient(metadata.ConfigFor(config), options.HTTPClient) if err != nil { return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) } c := &client{ typedClient: typedClient{ - cache: clientcache, + resources: resources, paramCodec: runtime.NewParameterCodec(options.Scheme), }, unstructuredClient: unstructuredClient{ - cache: clientcache, + resources: resources, paramCodec: noConversionParamCodec{}, }, metadataClient: metadataClient{ @@ -143,20 +185,65 @@ func newClient(config *rest.Config, options Options) (*client, error) { scheme: options.Scheme, mapper: options.Mapper, } + if options.Cache == nil || options.Cache.Reader == nil { + return c, nil + } + // We want a cache if we're here. + // Set the cache. + c.cache = options.Cache.Reader + + // Load uncached GVKs. + c.cacheUnstructured = options.Cache.Unstructured + c.uncachedGVKs = map[schema.GroupVersionKind]struct{}{} + for _, obj := range options.Cache.DisableFor { + gvk, err := c.GroupVersionKindFor(obj) + if err != nil { + return nil, err + } + c.uncachedGVKs[gvk] = struct{}{} + } return c, nil } var _ Client = &client{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. +// client is a client.Client that reads and writes directly from/to an API server. +// It lazily initializes new clients at the time they are used. type client struct { typedClient typedClient unstructuredClient unstructuredClient metadataClient metadataClient scheme *runtime.Scheme mapper meta.RESTMapper + + cache Reader + uncachedGVKs map[schema.GroupVersionKind]struct{} + cacheUnstructured bool +} + +func (c *client) shouldBypassCache(obj runtime.Object) (bool, error) { + if c.cache == nil { + return true, nil + } + + gvk, err := c.GroupVersionKindFor(obj) + if err != nil { + return false, err + } + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + if meta.IsListType(obj) { + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + } + if _, isUncached := c.uncachedGVKs[gvk]; isUncached { + return true, nil + } + if !c.cacheUnstructured { + _, isUnstructured := obj.(runtime.Unstructured) + return isUnstructured, nil + } + return false, nil } // resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. @@ -168,6 +255,16 @@ func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersi } } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *client) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return apiutil.GVKForObject(obj, c.scheme) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *client) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return apiutil.IsObjectNamespaced(obj, c.scheme, c.mapper) +} + // Scheme returns the scheme this client is using. func (c *client) Scheme() *runtime.Scheme { return c.scheme @@ -181,7 +278,7 @@ func (c *client) RESTMapper() meta.RESTMapper { // Create implements client.Client. func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Create(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot create using only metadata") @@ -194,7 +291,7 @@ func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) e func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Update(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") @@ -206,7 +303,7 @@ func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) e // Delete implements client.Client. func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Delete(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.Delete(ctx, obj, opts...) @@ -218,7 +315,7 @@ func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) e // DeleteAllOf implements client.Client. func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.DeleteAllOf(ctx, obj, opts...) @@ -231,7 +328,7 @@ func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllO func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Patch(ctx, obj, patch, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.Patch(ctx, obj, patch, opts...) @@ -242,8 +339,14 @@ func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...Pat // Get implements client.Client. func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + if isUncached, err := c.shouldBypassCache(obj); err != nil { + return err + } else if !isUncached { + return c.cache.Get(ctx, key, obj, opts...) + } + switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Get(ctx, key, obj, opts...) case *metav1.PartialObjectMetadata: // Metadata only object should always preserve the GVK coming in from the caller. @@ -256,8 +359,14 @@ func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...Get // List implements client.Client. func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if isUncached, err := c.shouldBypassCache(obj); err != nil { + return err + } else if !isUncached { + return c.cache.List(ctx, obj, opts...) + } + switch x := obj.(type) { - case *unstructured.UnstructuredList: + case runtime.Unstructured: return c.unstructuredClient.List(ctx, obj, opts...) case *metav1.PartialObjectMetadataList: // Metadata only object should always preserve the GVK. @@ -431,7 +540,7 @@ func (po *SubResourcePatchOptions) ApplyToSubResourcePatch(o *SubResourcePatchOp func (sc *subResourceClient) Get(ctx context.Context, obj Object, subResource Object, opts ...SubResourceGetOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.GetSubResource(ctx, obj, subResource, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return errors.New("can not get subresource using only metadata") @@ -446,7 +555,7 @@ func (sc *subResourceClient) Create(ctx context.Context, obj Object, subResource defer sc.client.resetGroupVersionKind(subResource, subResource.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.CreateSubResource(ctx, obj, subResource, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") @@ -459,7 +568,7 @@ func (sc *subResourceClient) Create(ctx context.Context, obj Object, subResource func (sc *subResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.UpdateSubResource(ctx, obj, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") @@ -472,7 +581,7 @@ func (sc *subResourceClient) Update(ctx context.Context, obj Object, opts ...Sub func (sc *subResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) case *metav1.PartialObjectMetadata: return sc.client.metadataClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go similarity index 82% rename from vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go index 857a0b38..2d078795 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go @@ -17,12 +17,12 @@ limitations under the License. package client import ( + "net/http" "strings" "sync" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -30,8 +30,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// clientCache creates and caches rest clients and metadata for Kubernetes types. -type clientCache struct { +// clientRestResources creates and stores rest clients and metadata for Kubernetes types. +type clientRestResources struct { + // httpClient is the http client to use for requests + httpClient *http.Client + // config is the rest.Config to talk to an apiserver config *rest.Config @@ -44,22 +47,22 @@ type clientCache struct { // codecs are used to create a REST client for a gvk codecs serializer.CodecFactory - // structuredResourceByType caches structured type metadata + // structuredResourceByType stores structured type metadata structuredResourceByType map[schema.GroupVersionKind]*resourceMeta - // unstructuredResourceByType caches unstructured type metadata + // unstructuredResourceByType stores unstructured type metadata unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta mu sync.RWMutex } // newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. // If the object is a list, the resource represents the item's type instead. -func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { +func (c *clientRestResources) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { if strings.HasSuffix(gvk.Kind, "List") && isList { // if this was a list, treat it as a request for the item's resource gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] } - client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs) + client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs, c.httpClient) if err != nil { return nil, err } @@ -72,15 +75,13 @@ func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstruc // getResource returns the resource meta information for the given type of object. // If the object is a list, the resource represents the item's type instead. -func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { +func (c *clientRestResources) getResource(obj runtime.Object) (*resourceMeta, error) { gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return nil, err } - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - isUnstructured = isUnstructured || isUnstructuredList + _, isUnstructured := obj.(runtime.Unstructured) // It's better to do creation work twice than to not let multiple // people make requests at once @@ -108,7 +109,7 @@ func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { } // getObjMeta returns objMeta containing both type and object metadata and state. -func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { +func (c *clientRestResources) getObjMeta(obj runtime.Object) (*objMeta, error) { r, err := c.getResource(obj) if err != nil { return nil, err @@ -120,7 +121,7 @@ func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { return &objMeta{resourceMeta: r, Object: m}, err } -// resourceMeta caches state for a Kubernetes type. +// resourceMeta stores state for a Kubernetes type. type resourceMeta struct { // client is the rest client used to talk to the apiserver rest.Interface diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go index e4e8585c..5f0a6d4b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go @@ -98,12 +98,12 @@ func GetConfigWithContext(context string) (*rest.Config, error) { if err != nil { return nil, err } - if cfg.QPS == 0.0 { cfg.QPS = 20.0 - cfg.Burst = 30.0 } - + if cfg.Burst == 0 { + cfg.Burst = 30 + } return cfg, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go index e0e28850..b2e20249 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -26,8 +26,7 @@ limitations under the License. // to the API server. // // It is a common pattern in Kubernetes to read from a cache and write to the API -// server. This pattern is covered by the DelegatingClient type, which can -// be used to have a client whose Reader is different from the Writer. +// server. This pattern is covered by the creating the Client with a Cache. // // # Options // diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go index 73b56429..bbcdd383 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // NewDryRunClient wraps an existing client and enforces DryRun mode @@ -46,6 +47,16 @@ func (c *dryRunClient) RESTMapper() meta.RESTMapper { return c.client.RESTMapper() } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *dryRunClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *dryRunClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + // Create implements client.Client. func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { return c.client.Create(ctx, obj, append(opts, DryRunAll)...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index b642f7f8..0ddda316 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -20,6 +20,7 @@ import ( "context" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -169,6 +170,10 @@ type Client interface { Scheme() *runtime.Scheme // RESTMapper returns the rest this client is using. RESTMapper() meta.RESTMapper + // GroupVersionKindFor returns the GroupVersionKind for the given object. + GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) + // IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. + IsObjectNamespaced(obj runtime.Object) (bool, error) } // WithWatch supports Watch on top of the CRUD operations supported by diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go index 00bc2175..222dc795 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" + "k8s.io/apimachinery/pkg/runtime/schema" ) // NewNamespacedClient wraps an existing client enforcing the namespace value. @@ -52,9 +52,19 @@ func (n *namespacedClient) RESTMapper() meta.RESTMapper { return n.client.RESTMapper() } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (n *namespacedClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return n.client.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (n *namespacedClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return n.client.IsObjectNamespaced(obj) +} + // Create implements client.Client. func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -72,7 +82,7 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat // Update implements client.Client. func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -90,7 +100,7 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat // Delete implements client.Client. func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -108,7 +118,7 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet // DeleteAllOf implements client.Client. func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -121,7 +131,7 @@ func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ... // Patch implements client.Client. func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -139,7 +149,7 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o // Get implements client.Client. func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -180,7 +190,7 @@ type namespacedClientSubResourceClient struct { } func (nsw *namespacedClientSubResourceClient) Get(ctx context.Context, obj, subResource Object, opts ...SubResourceGetOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -198,7 +208,7 @@ func (nsw *namespacedClientSubResourceClient) Get(ctx context.Context, obj, subR } func (nsw *namespacedClientSubResourceClient) Create(ctx context.Context, obj, subResource Object, opts ...SubResourceCreateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -217,7 +227,7 @@ func (nsw *namespacedClientSubResourceClient) Create(ctx context.Context, obj, s // Update implements client.SubResourceWriter. func (nsw *namespacedClientSubResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -235,8 +245,7 @@ func (nsw *namespacedClientSubResourceClient) Update(ctx context.Context, obj Ob // Patch implements client.SubResourceWriter. func (nsw *namespacedClientSubResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) - + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index 7f6f5b83..d81bf25d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -513,8 +513,15 @@ type MatchingLabels map[string]string // ApplyToList applies this configuration to the given list options. func (m MatchingLabels) ApplyToList(opts *ListOptions) { // TODO(directxman12): can we avoid reserializing this over and over? - sel := labels.SelectorFromValidatedSet(map[string]string(m)) - opts.LabelSelector = sel + if opts.LabelSelector == nil { + opts.LabelSelector = labels.NewSelector() + } + // If there's already a selector, we need to AND the two together. + noValidSel := labels.SelectorFromValidatedSet(map[string]string(m)) + reqs, _ := noValidSel.Requirements() + for _, req := range reqs { + opts.LabelSelector = opts.LabelSelector.Add(req) + } } // ApplyToDeleteAllOf applies this configuration to the given an List options. @@ -528,14 +535,17 @@ type HasLabels []string // ApplyToList applies this configuration to the given list options. func (m HasLabels) ApplyToList(opts *ListOptions) { - sel := labels.NewSelector() + if opts.LabelSelector == nil { + opts.LabelSelector = labels.NewSelector() + } + // TODO: ignore invalid labels will result in an empty selector. + // This is inconsistent to the that of MatchingLabels. for _, label := range m { r, err := labels.NewRequirement(label, selection.Exists, nil) if err == nil { - sel = sel.Add(*r) + opts.LabelSelector = opts.LabelSelector.Add(*r) } } - opts.LabelSelector = sel } // ApplyToDeleteAllOf applies this configuration to the given an List options. @@ -606,6 +616,11 @@ func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { n.ApplyToList(&opts.ListOptions) } +// AsSelector returns a selector that matches objects in the given namespace. +func (n InNamespace) AsSelector() fields.Selector { + return fields.SelectorFromSet(fields.Set{"metadata.namespace": string(n)}) +} + // Limit specifies the maximum number of results to return from the server. // Limit does not implement DeleteAllOfOption interface because the server // does not support setting it for deletecollection operations. @@ -788,6 +803,11 @@ func (forceOwnership) ApplyToPatch(opts *PatchOptions) { opts.Force = &definitelyTrue } +func (forceOwnership) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + // }}} // {{{ DeleteAllOf Options diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go deleted file mode 100644 index 19d1ab4d..00000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "context" - "strings" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client. -type NewDelegatingClientInput struct { - CacheReader Reader - Client Client - UncachedObjects []Object - CacheUnstructured bool -} - -// NewDelegatingClient creates a new delegating client. -// -// A delegating client forms a Client by composing separate reader, writer and -// statusclient interfaces. This way, you can have an Client that reads from a -// cache and writes to the API server. -func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) { - uncachedGVKs := map[schema.GroupVersionKind]struct{}{} - for _, obj := range in.UncachedObjects { - gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme()) - if err != nil { - return nil, err - } - uncachedGVKs[gvk] = struct{}{} - } - - return &delegatingClient{ - scheme: in.Client.Scheme(), - mapper: in.Client.RESTMapper(), - Reader: &delegatingReader{ - CacheReader: in.CacheReader, - ClientReader: in.Client, - scheme: in.Client.Scheme(), - uncachedGVKs: uncachedGVKs, - cacheUnstructured: in.CacheUnstructured, - }, - Writer: in.Client, - StatusClient: in.Client, - SubResourceClientConstructor: in.Client, - }, nil -} - -type delegatingClient struct { - Reader - Writer - StatusClient - SubResourceClientConstructor - - scheme *runtime.Scheme - mapper meta.RESTMapper -} - -// Scheme returns the scheme this client is using. -func (d *delegatingClient) Scheme() *runtime.Scheme { - return d.scheme -} - -// RESTMapper returns the rest mapper this client is using. -func (d *delegatingClient) RESTMapper() meta.RESTMapper { - return d.mapper -} - -// delegatingReader forms a Reader that will cause Get and List requests for -// unstructured types to use the ClientReader while requests for any other type -// of object with use the CacheReader. This avoids accidentally caching the -// entire cluster in the common case of loading arbitrary unstructured objects -// (e.g. from OwnerReferences). -type delegatingReader struct { - CacheReader Reader - ClientReader Reader - - uncachedGVKs map[schema.GroupVersionKind]struct{} - scheme *runtime.Scheme - cacheUnstructured bool -} - -func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { - gvk, err := apiutil.GVKForObject(obj, d.scheme) - if err != nil { - return false, err - } - // TODO: this is producing unsafe guesses that don't actually work, - // but it matches ~99% of the cases out there. - if meta.IsListType(obj) { - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - } - if _, isUncached := d.uncachedGVKs[gvk]; isUncached { - return true, nil - } - if !d.cacheUnstructured { - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - return isUnstructured || isUnstructuredList, nil - } - return false, nil -} - -// Get retrieves an obj for a given object key from the Kubernetes Cluster. -func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - if isUncached, err := d.shouldBypassCache(obj); err != nil { - return err - } else if isUncached { - return d.ClientReader.Get(ctx, key, obj, opts...) - } - return d.CacheReader.Get(ctx, key, obj, opts...) -} - -// List retrieves list of objects for a given namespace and list options. -func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error { - if isUncached, err := d.shouldBypassCache(list); err != nil { - return err - } else if isUncached { - return d.ClientReader.List(ctx, list, opts...) - } - return d.CacheReader.List(ctx, list, opts...) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go index ade25157..92afd9a9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -25,16 +25,14 @@ import ( var _ Reader = &typedClient{} var _ Writer = &typedClient{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. type typedClient struct { - cache *clientCache + resources *clientRestResources paramCodec runtime.ParameterCodec } // Create implements client.Client. func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -53,7 +51,7 @@ func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOpti // Update implements client.Client. func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -73,7 +71,7 @@ func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOpti // Delete implements client.Client. func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -92,7 +90,7 @@ func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOpti // DeleteAllOf implements client.Client. func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -111,7 +109,7 @@ func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...Delet // Patch implements client.Client. func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -136,7 +134,7 @@ func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts . // Get implements client.Client. func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - r, err := c.cache.getResource(obj) + r, err := c.resources.getResource(obj) if err != nil { return err } @@ -151,7 +149,7 @@ func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts . // List implements client.Client. func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - r, err := c.cache.getResource(obj) + r, err := c.resources.getResource(obj) if err != nil { return err } @@ -168,7 +166,7 @@ func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOpti } func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -191,7 +189,7 @@ func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Ob } func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -216,7 +214,7 @@ func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subReso // UpdateSubResource used by SubResourceWriter to write status. func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -251,7 +249,7 @@ func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subReso // PatchSubResource used by SubResourceWriter to write subresource. func (c *typedClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go index 7f25c7be..0d969517 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -21,30 +21,27 @@ import ( "fmt" "strings" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" ) var _ Reader = &unstructuredClient{} var _ Writer = &unstructuredClient{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. type unstructuredClient struct { - cache *clientCache + resources *clientRestResources paramCodec runtime.ParameterCodec } // Create implements client.Client. func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -60,20 +57,20 @@ func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...Cr Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // Update implements client.Client. func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -90,17 +87,17 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...Up Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // Delete implements client.Client. func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -119,11 +116,11 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...De // DeleteAllOf implements client.Client. func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -142,11 +139,11 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts // Patch implements client.Client. func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -171,17 +168,17 @@ func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch // Get implements client.Client. func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() getOpts := GetOptions{} getOpts.ApplyOptions(opts) - r, err := uc.cache.getResource(obj) + r, err := uc.resources.getResource(obj) if err != nil { return err } @@ -194,22 +191,22 @@ func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // List implements client.Client. func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - u, ok := obj.(*unstructured.UnstructuredList) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - r, err := uc.cache.getResource(obj) + r, err := uc.resources.getResource(obj) if err != nil { return err } @@ -226,19 +223,19 @@ func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ... } func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", subResource) + if _, ok := obj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) } - if _, ok := subResourceObj.(*unstructured.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", obj) + if _, ok := subResourceObj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) } if subResourceObj.GetName() == "" { subResourceObj.SetName(obj.GetName()) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -257,19 +254,19 @@ func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResour } func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) + if _, ok := obj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) } - if _, ok := subResourceObj.(*unstructured.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", obj) + if _, ok := subResourceObj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) } if subResourceObj.GetName() == "" { subResourceObj.SetName(obj.GetName()) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -289,11 +286,11 @@ func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subRes } func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -324,14 +321,14 @@ func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object, } func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -359,6 +356,6 @@ func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object, Do(ctx). Into(body) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go index 70490664..181b22a6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go @@ -21,9 +21,8 @@ import ( "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" ) @@ -33,21 +32,16 @@ func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) { if err != nil { return nil, err } - dynamicClient, err := dynamic.NewForConfig(config) - if err != nil { - return nil, err - } - return &watchingClient{client: client, dynamic: dynamicClient}, nil + return &watchingClient{client: client}, nil } type watchingClient struct { *client - dynamic dynamic.Interface } func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) { switch l := list.(type) { - case *unstructured.UnstructuredList: + case runtime.Unstructured: return w.unstructuredWatch(ctx, l, opts...) case *metav1.PartialObjectMetadataList: return w.metadataWatch(ctx, l, opts...) @@ -81,25 +75,23 @@ func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialO return resInt.Watch(ctx, *listOpts.AsListOptions()) } -func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) { - gvk := obj.GroupVersionKind() - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - - r, err := w.client.unstructuredClient.cache.getResource(obj) +func (w *watchingClient) unstructuredWatch(ctx context.Context, obj runtime.Unstructured, opts ...ListOption) (watch.Interface, error) { + r, err := w.client.unstructuredClient.resources.getResource(obj) if err != nil { return nil, err } listOpts := w.listOpts(opts...) - if listOpts.Namespace != "" && r.isNamespaced() { - return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions()) - } - return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions()) + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), w.client.unstructuredClient.paramCodec). + Watch(ctx) } func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) { - r, err := w.client.typedClient.cache.getResource(obj) + r, err := w.client.typedClient.resources.getResource(obj) if err != nil { return nil, err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go index 905296cd..7ab76555 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go @@ -19,6 +19,7 @@ package cluster import ( "context" "errors" + "net/http" "time" "github.com/go-logr/logr" @@ -27,6 +28,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" @@ -37,14 +39,15 @@ import ( // Cluster provides various methods to interact with a cluster. type Cluster interface { - // SetFields will set any dependencies on an object for which the object has implemented the inject - // interface - e.g. inject.Client. - // Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. - SetFields(interface{}) error + // GetHTTPClient returns an HTTP client that can be used to talk to the apiserver + GetHTTPClient() *http.Client // GetConfig returns an initialized Config GetConfig() *rest.Config + // GetCache returns a cache.Cache + GetCache() cache.Cache + // GetScheme returns an initialized Scheme GetScheme() *runtime.Scheme @@ -57,9 +60,6 @@ type Cluster interface { // GetFieldIndexer returns a client.FieldIndexer configured with the client GetFieldIndexer() client.FieldIndexer - // GetCache returns a cache.Cache - GetCache() cache.Cache - // GetEventRecorderFor returns a new EventRecorder for the provided name GetEventRecorderFor(name string) record.EventRecorder @@ -83,7 +83,7 @@ type Options struct { Scheme *runtime.Scheme // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs - MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + MapperProvider func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) // Logger is the logger that should be used by this Cluster. // If none is set, it defaults to log.Log global logger. @@ -103,24 +103,54 @@ type Options struct { // Note: If a namespace is specified, controllers can still Watch for a // cluster-scoped resource (e.g Node). For namespaced resources the cache // will only hold objects from the desired namespace. + // + // Deprecated: Use Cache.Namespaces instead. Namespace string + // HTTPClient is the http client that will be used to create the default + // Cache and Client. If not set the rest.HTTPClientFor function will be used + // to create the http client. + HTTPClient *http.Client + + // Cache is the cache.Options that will be used to create the default Cache. + // By default, the cache will watch and list requested objects in all namespaces. + Cache cache.Options + // NewCache is the function that will create the cache to be used // by the manager. If not set this will use the default new cache function. + // + // When using a custom NewCache, the Cache options will be passed to the + // NewCache function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewCache if you know what you are doing. NewCache cache.NewCacheFunc + // Client is the client.Options that will be used to create the default Client. + // By default, the client will use the cache for reads and direct calls for writes. + Client client.Options + // NewClient is the func that creates the client to be used by the manager. - // If not set this will create the default DelegatingClient that will - // use the cache for reads and the client for writes. - // NOTE: The default client will not cache Unstructured. - NewClient NewClientFunc + // If not set this will create a Client backed by a Cache for read operations + // and a direct Client for write operations. + // + // When using a custom NewClient, the Client options will be passed to the + // NewClient function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewClient if you know what you are doing. + NewClient client.NewClientFunc // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it // for the given objects. + // + // Deprecated: Use Client.Cache.DisableFor instead. ClientDisableCacheFor []client.Object // DryRunClient specifies whether the client should be configured to enforce // dryRun mode. + // + // Deprecated: Use Client.DryRun instead. DryRunClient bool // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API @@ -137,7 +167,7 @@ type Options struct { makeBroadcaster intrec.EventBroadcasterProducer // Dependency injection for testing - newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) + newRecorderProvider func(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) } // Option can be used to manipulate Options. @@ -149,56 +179,116 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { return nil, errors.New("must specify Config") } + originalConfig := config + + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + options := Options{} for _, opt := range opts { opt(&options) } - options = setOptionsDefaults(options) + options, err := setOptionsDefaults(options, config) + if err != nil { + options.Logger.Error(err, "Failed to set defaults") + return nil, err + } // Create the mapper provider - mapper, err := options.MapperProvider(config) + mapper, err := options.MapperProvider(config, options.HTTPClient) if err != nil { options.Logger.Error(err, "Failed to get API Group-Resources") return nil, err } // Create the cache for the cached read client and registering informers - cache, err := options.NewCache(config, cache.Options{Scheme: options.Scheme, Mapper: mapper, Resync: options.SyncPeriod, Namespace: options.Namespace}) + cacheOpts := options.Cache + { + if cacheOpts.Scheme == nil { + cacheOpts.Scheme = options.Scheme + } + if cacheOpts.Mapper == nil { + cacheOpts.Mapper = mapper + } + if cacheOpts.HTTPClient == nil { + cacheOpts.HTTPClient = options.HTTPClient + } + if cacheOpts.SyncPeriod == nil { + cacheOpts.SyncPeriod = options.SyncPeriod + } + if len(cacheOpts.Namespaces) == 0 && options.Namespace != "" { + cacheOpts.Namespaces = []string{options.Namespace} + } + } + cache, err := options.NewCache(config, cacheOpts) if err != nil { return nil, err } - clientOptions := client.Options{Scheme: options.Scheme, Mapper: mapper} + // Create the client, and default its options. + clientOpts := options.Client + { + if clientOpts.Scheme == nil { + clientOpts.Scheme = options.Scheme + } + if clientOpts.Mapper == nil { + clientOpts.Mapper = mapper + } + if clientOpts.HTTPClient == nil { + clientOpts.HTTPClient = options.HTTPClient + } + if clientOpts.Cache == nil { + clientOpts.Cache = &client.CacheOptions{ + Unstructured: false, + } + } + if clientOpts.Cache.Reader == nil { + clientOpts.Cache.Reader = cache + } + + // For backward compatibility, the ClientDisableCacheFor option should + // be appended to the DisableFor option in the client. + clientOpts.Cache.DisableFor = append(clientOpts.Cache.DisableFor, options.ClientDisableCacheFor...) - apiReader, err := client.New(config, clientOptions) + if clientOpts.DryRun == nil && options.DryRunClient { + // For backward compatibility, the DryRunClient (if set) option should override + // the DryRun option in the client (if unset). + clientOpts.DryRun = pointer.Bool(true) + } + } + clientWriter, err := options.NewClient(config, clientOpts) if err != nil { return nil, err } - writeObj, err := options.NewClient(cache, config, clientOptions, options.ClientDisableCacheFor...) + // Create the API Reader, a client with no cache. + clientReader, err := client.New(config, client.Options{ + HTTPClient: options.HTTPClient, + Scheme: options.Scheme, + Mapper: mapper, + }) if err != nil { return nil, err } - if options.DryRunClient { - writeObj = client.NewDryRunClient(writeObj) - } - // Create the recorder provider to inject event recorders for the components. // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific // to the particular controller that it's being injected into, rather than a generic one like is here. - recorderProvider, err := options.newRecorderProvider(config, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) + recorderProvider, err := options.newRecorderProvider(config, options.HTTPClient, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } return &cluster{ - config: config, + config: originalConfig, + httpClient: options.HTTPClient, scheme: options.Scheme, cache: cache, fieldIndexes: cache, - client: writeObj, - apiReader: apiReader, + client: clientWriter, + apiReader: clientReader, recorderProvider: recorderProvider, mapper: mapper, logger: options.Logger, @@ -206,21 +296,27 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { } // setOptionsDefaults set default values for Options fields. -func setOptionsDefaults(options Options) Options { +func setOptionsDefaults(options Options, config *rest.Config) (Options, error) { + if options.HTTPClient == nil { + var err error + options.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + return options, err + } + } + // Use the Kubernetes client-go scheme if none is specified if options.Scheme == nil { options.Scheme = scheme.Scheme } if options.MapperProvider == nil { - options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) { - return apiutil.NewDynamicRESTMapper(c) - } + options.MapperProvider = apiutil.NewDynamicRESTMapper } // Allow users to define how to create a new client if options.NewClient == nil { - options.NewClient = DefaultNewClient + options.NewClient = client.New } // Allow newCache to be mocked @@ -250,39 +346,5 @@ func setOptionsDefaults(options Options) Options { options.Logger = logf.RuntimeLog.WithName("cluster") } - return options -} - -// NewClientFunc allows a user to define how to create a client. -type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) - -// ClientOptions are the optional arguments for tuning the caching client. -type ClientOptions struct { - UncachedObjects []client.Object - CacheUnstructured bool -} - -// DefaultNewClient creates the default caching client, that will never cache Unstructured. -func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { - return ClientBuilderWithOptions(ClientOptions{})(cache, config, options, uncachedObjects...) -} - -// ClientBuilderWithOptions returns a Client constructor that will build a client -// honoring the options argument -func ClientBuilderWithOptions(options ClientOptions) NewClientFunc { - return func(cache cache.Cache, config *rest.Config, clientOpts client.Options, uncachedObjects ...client.Object) (client.Client, error) { - options.UncachedObjects = append(options.UncachedObjects, uncachedObjects...) - - c, err := client.New(config, clientOpts) - if err != nil { - return nil, err - } - - return client.NewDelegatingClient(client.NewDelegatingClientInput{ - CacheReader: cache, - Client: c, - UncachedObjects: options.UncachedObjects, - CacheUnstructured: options.CacheUnstructured, - }) - } + return options, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go index 125e1d14..27427642 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go @@ -18,6 +18,7 @@ package cluster import ( "context" + "net/http" "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/meta" @@ -28,22 +29,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) type cluster struct { // config is the rest.config used to talk to the apiserver. Required. config *rest.Config - // scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults - // to scheme.scheme. - scheme *runtime.Scheme - - cache cache.Cache - - // TODO(directxman12): Provide an escape hatch to get individual indexers - // client is the client injected into Controllers (and EventHandlers, Sources and Predicates). - client client.Client + httpClient *http.Client + scheme *runtime.Scheme + cache cache.Cache + client client.Client // apiReader is the reader that will make requests to the api server and not the cache. apiReader client.Reader @@ -64,32 +59,14 @@ type cluster struct { logger logr.Logger } -func (c *cluster) SetFields(i interface{}) error { - if _, err := inject.ConfigInto(c.config, i); err != nil { - return err - } - if _, err := inject.ClientInto(c.client, i); err != nil { - return err - } - if _, err := inject.APIReaderInto(c.apiReader, i); err != nil { - return err - } - if _, err := inject.SchemeInto(c.scheme, i); err != nil { - return err - } - if _, err := inject.CacheInto(c.cache, i); err != nil { - return err - } - if _, err := inject.MapperInto(c.mapper, i); err != nil { - return err - } - return nil -} - func (c *cluster) GetConfig() *rest.Config { return c.config } +func (c *cluster) GetHTTPClient() *http.Client { + return c.httpClient +} + func (c *cluster) GetClient() client.Client { return c.client } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go index 8e853d6a..9c7b875a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go @@ -29,6 +29,8 @@ import ( // ControllerManagerConfiguration defines the functions necessary to parse a config file // and to configure the Options struct for the ctrl.Manager. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerManagerConfiguration interface { runtime.Object @@ -38,6 +40,8 @@ type ControllerManagerConfiguration interface { // DeferredFileLoader is used to configure the decoder for loading controller // runtime component config types. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type DeferredFileLoader struct { ControllerManagerConfiguration path string @@ -52,6 +56,8 @@ type DeferredFileLoader struct { // Defaults: // * Path: "./config.yaml" // * Kind: GenericControllerManagerConfiguration +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. func File() *DeferredFileLoader { scheme := runtime.NewScheme() utilruntime.Must(v1alpha1.AddToScheme(scheme)) @@ -83,12 +89,6 @@ func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *Deferre return d } -// InjectScheme will configure the scheme to be used for decoding the file. -func (d *DeferredFileLoader) InjectScheme(scheme *runtime.Scheme) error { - d.scheme = scheme - return nil -} - // loadFile is used from the mutex.Once to load the file. func (d *DeferredFileLoader) loadFile() { if d.scheme == nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go new file mode 100644 index 00000000..b37dffae --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go @@ -0,0 +1,49 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import "time" + +// Controller contains configuration options for a controller. +type Controller struct { + // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation + // allowed for that controller. + // + // When a controller is registered within this manager using the builder utilities, + // users have to specify the type the controller reconciles in the For(...) call. + // If the object's kind passed matches one of the keys in this map, the concurrency + // for that controller is set to the number specified. + // + // The key is expected to be consistent in form with GroupKind.String(), + // e.g. ReplicaSet in apps group (regardless of version) would be `ReplicaSet.apps`. + GroupKindConcurrency map[string]int + + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. + MaxConcurrentReconciles int + + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Defaults to the Controller.RecoverPanic setting from the Manager if unset. + RecoverPanic *bool + + // NeedLeaderElection indicates whether the controller needs to use leader election. + // Defaults to true, which means the controller will use leader election. + NeedLeaderElection *bool +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go index a169ec55..47a5a2f1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go @@ -14,12 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package config contains functionality for interacting with ComponentConfig -// files -// -// # DeferredFileLoader -// -// This uses a deferred file decoding allowing you to chain your configuration -// setup. You can pass this into manager.Options#File and it will load your -// config. +// Package config contains functionality for interacting with +// configuration for controller-runtime components. package config diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go index 1e3adbaf..8fdf14d3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go @@ -17,4 +17,6 @@ limitations under the License. // Package v1alpha1 provides the ControllerManagerConfiguration used for // configuring ctrl.Manager // +kubebuilder:object:generate=true +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. package v1alpha1 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go index 9efdbc06..ca854bcf 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go @@ -23,12 +23,18 @@ import ( var ( // GroupVersion is group version used to register these objects. + // + // Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + // + // Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. + // + // Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go index f2226278..52c8ab30 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go @@ -25,6 +25,8 @@ import ( ) // ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerManagerConfigurationSpec struct { // SyncPeriod determines the minimum frequency at which watched resources are // reconciled. A lower period will correct entropy more quickly, but reduce @@ -60,7 +62,7 @@ type ControllerManagerConfigurationSpec struct { // +optional Controller *ControllerConfigurationSpec `json:"controller,omitempty"` - // Metrics contains thw controller metrics configuration + // Metrics contains the controller metrics configuration // +optional Metrics ControllerMetrics `json:"metrics,omitempty"` @@ -75,6 +77,11 @@ type ControllerManagerConfigurationSpec struct { // ControllerConfigurationSpec defines the global configuration for // controllers registered with the manager. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. +// +// Deprecated: Controller global configuration can now be set at the manager level, +// using the manager.Options.Controller field. type ControllerConfigurationSpec struct { // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation // allowed for that controller. @@ -101,6 +108,8 @@ type ControllerConfigurationSpec struct { } // ControllerMetrics defines the metrics configs. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerMetrics struct { // BindAddress is the TCP address that the controller should bind to // for serving prometheus metrics. @@ -110,6 +119,8 @@ type ControllerMetrics struct { } // ControllerHealth defines the health configs. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerHealth struct { // HealthProbeBindAddress is the TCP address that the controller should bind to // for serving health probes @@ -127,6 +138,8 @@ type ControllerHealth struct { } // ControllerWebhook defines the webhook server for the controller. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerWebhook struct { // Port is the port that the webhook server serves at. // It is used to set webhook.Server.Port. @@ -149,6 +162,8 @@ type ControllerWebhook struct { // +kubebuilder:object:root=true // ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerManagerConfiguration struct { metav1.TypeMeta `json:",inline"` @@ -157,6 +172,8 @@ type ControllerManagerConfiguration struct { } // Complete returns the configuration for controller-runtime. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) { return *c, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index fe7f94fd..6732b6f7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -39,6 +39,18 @@ type Options struct { // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. MaxConcurrentReconciles int + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Defaults to the Controller.RecoverPanic setting from the Manager if unset. + RecoverPanic *bool + + // NeedLeaderElection indicates whether the controller needs to use leader election. + // Defaults to true, which means the controller will use leader election. + NeedLeaderElection *bool + // Reconciler reconciles an object Reconciler reconcile.Reconciler @@ -50,14 +62,6 @@ type Options struct { // LogConstructor is used to construct a logger used for this controller and passed // to each reconciliation via the context field. LogConstructor func(request *reconcile.Request) logr.Logger - - // CacheSyncTimeout refers to the time limit set to wait for syncing caches. - // Defaults to 2 minutes if not set. - CacheSyncTimeout time.Duration - - // RecoverPanic indicates whether the panic caused by reconcile should be recovered. - // Defaults to the Controller.RecoverPanic setting from the Manager if unset. - RecoverPanic *bool } // Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests @@ -124,26 +128,33 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller } if options.MaxConcurrentReconciles <= 0 { - options.MaxConcurrentReconciles = 1 + if mgr.GetControllerOptions().MaxConcurrentReconciles > 0 { + options.MaxConcurrentReconciles = mgr.GetControllerOptions().MaxConcurrentReconciles + } else { + options.MaxConcurrentReconciles = 1 + } } if options.CacheSyncTimeout == 0 { - options.CacheSyncTimeout = 2 * time.Minute + if mgr.GetControllerOptions().CacheSyncTimeout != 0 { + options.CacheSyncTimeout = mgr.GetControllerOptions().CacheSyncTimeout + } else { + options.CacheSyncTimeout = 2 * time.Minute + } } if options.RateLimiter == nil { options.RateLimiter = workqueue.DefaultControllerRateLimiter() } - // Inject dependencies into Reconciler - if err := mgr.SetFields(options.Reconciler); err != nil { - return nil, err - } - if options.RecoverPanic == nil { options.RecoverPanic = mgr.GetControllerOptions().RecoverPanic } + if options.NeedLeaderElection == nil { + options.NeedLeaderElection = mgr.GetControllerOptions().NeedLeaderElection + } + // Create controller with dependencies set return &controller.Controller{ Do: options.Reconciler, @@ -152,10 +163,10 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller }, MaxConcurrentReconciles: options.MaxConcurrentReconciles, CacheSyncTimeout: options.CacheSyncTimeout, - SetFields: mgr.SetFields, Name: name, LogConstructor: options.LogConstructor, RecoverPanic: options.RecoverPanic, + LeaderElected: options.NeedLeaderElection, }, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go index dc38b793..f9c58ea2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go @@ -166,7 +166,7 @@ func WaitForCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefi // Poll until all resources are found in discovery p := &poller{config: config, waitingFor: waitingFor} - return wait.PollImmediate(options.PollInterval, options.MaxTime, p.poll) + return wait.PollUntilContextTimeout(context.TODO(), options.PollInterval, options.MaxTime, true, p.poll) } // poller checks if all the resources have been found in discovery, and returns false if not. @@ -179,7 +179,7 @@ type poller struct { } // poll checks if all the resources have been found in discovery, and returns false if not. -func (p *poller) poll() (done bool, err error) { +func (p *poller) poll(ctx context.Context) (done bool, err error) { // Create a new clientset to avoid any client caching of discovery cs, err := clientset.NewForConfig(p.config) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go index f9e0bb8a..4ee440df 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go @@ -230,17 +230,19 @@ func (te *Environment) Start() (*rest.Config, error) { if os.Getenv(envAttachOutput) == "true" { te.AttachControlPlaneOutput = true } - if apiServer.Out == nil && te.AttachControlPlaneOutput { - apiServer.Out = os.Stdout - } - if apiServer.Err == nil && te.AttachControlPlaneOutput { - apiServer.Err = os.Stderr - } - if te.ControlPlane.Etcd.Out == nil && te.AttachControlPlaneOutput { - te.ControlPlane.Etcd.Out = os.Stdout - } - if te.ControlPlane.Etcd.Err == nil && te.AttachControlPlaneOutput { - te.ControlPlane.Etcd.Err = os.Stderr + if te.AttachControlPlaneOutput { + if apiServer.Out == nil { + apiServer.Out = os.Stdout + } + if apiServer.Err == nil { + apiServer.Err = os.Stderr + } + if te.ControlPlane.Etcd.Out == nil { + te.ControlPlane.Etcd.Out = os.Stdout + } + if te.ControlPlane.Etcd.Err == nil { + te.ControlPlane.Etcd.Err = os.Stderr + } } apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go index 49d87735..f7e43a14 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go @@ -147,6 +147,8 @@ func (o *WebhookInstallOptions) PrepWithoutInstalling() error { // Install installs specified webhooks to the API server. func (o *WebhookInstallOptions) Install(config *rest.Config) error { + defaultWebhookOptions(o) + if len(o.LocalServingCAData) == 0 { if err := o.PrepWithoutInstalling(); err != nil { return err @@ -168,6 +170,16 @@ func (o *WebhookInstallOptions) Cleanup() error { return nil } +// defaultWebhookOptions sets the default values for Webhooks. +func defaultWebhookOptions(o *WebhookInstallOptions) { + if o.MaxTime == 0 { + o.MaxTime = defaultMaxWait + } + if o.PollInterval == 0 { + o.PollInterval = defaultPollInterval + } +} + // WaitForWebhooks waits for the Webhooks to be available through API server. func WaitForWebhooks(config *rest.Config, mutatingWebhooks []*admissionv1.MutatingWebhookConfiguration, @@ -203,7 +215,7 @@ func WaitForWebhooks(config *rest.Config, // Poll until all resources are found in discovery p := &webhookPoller{config: config, waitingFor: waitingFor} - return wait.PollImmediate(options.PollInterval, options.MaxTime, p.poll) + return wait.PollUntilContextTimeout(context.TODO(), options.PollInterval, options.MaxTime, true, p.poll) } // poller checks if all the resources have been found in discovery, and returns false if not. @@ -216,7 +228,7 @@ type webhookPoller struct { } // poll checks if all the resources have been found in discovery, and returns false if not. -func (p *webhookPoller) poll() (done bool, err error) { +func (p *webhookPoller) poll(ctx context.Context) (done bool, err error) { // Create a new clientset to avoid any client caching of discovery c, err := client.New(p.config, client.Options{}) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go index e6d3a4ea..c72b2e1e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go @@ -17,6 +17,8 @@ limitations under the License. package handler import ( + "context" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" @@ -36,7 +38,7 @@ var _ EventHandler = &EnqueueRequestForObject{} type EnqueueRequestForObject struct{} // Create implements EventHandler. -func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) return @@ -48,7 +50,7 @@ func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.Rate } // Update implements EventHandler. -func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { switch { case evt.ObjectNew != nil: q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ @@ -66,7 +68,7 @@ func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.Rate } // Delete implements EventHandler. -func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) return @@ -78,7 +80,7 @@ func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.Rate } // Generic implements EventHandler. -func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt) return diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go index 17401b1f..b55fdde6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go @@ -17,16 +17,17 @@ limitations under the License. package handler import ( + "context" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) // MapFunc is the signature required for enqueueing requests from a generic function. // This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler. -type MapFunc func(client.Object) []reconcile.Request +type MapFunc func(context.Context, client.Object) []reconcile.Request // EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection // of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects @@ -52,32 +53,32 @@ type enqueueRequestsFromMapFunc struct { } // Create implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.Object, reqs) + e.mapAndEnqueue(ctx, q, evt.Object, reqs) } // Update implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.ObjectOld, reqs) - e.mapAndEnqueue(q, evt.ObjectNew, reqs) + e.mapAndEnqueue(ctx, q, evt.ObjectOld, reqs) + e.mapAndEnqueue(ctx, q, evt.ObjectNew, reqs) } // Delete implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.Object, reqs) + e.mapAndEnqueue(ctx, q, evt.Object, reqs) } // Generic implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.Object, reqs) + e.mapAndEnqueue(ctx, q, evt.Object, reqs) } -func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) { - for _, req := range e.toRequests(object) { +func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) { + for _, req := range e.toRequests(ctx, object) { _, ok := reqs[req] if !ok { q.Add(req) @@ -85,13 +86,3 @@ func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInter } } } - -// EnqueueRequestsFromMapFunc can inject fields into the mapper. - -// InjectFunc implements inject.Injector. -func (e *enqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error { - if f == nil { - return nil - } - return f(e.toRequests) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go index 63699893..02e7d756 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go @@ -17,6 +17,7 @@ limitations under the License. package handler import ( + "context" "fmt" "k8s.io/apimachinery/pkg/api/meta" @@ -25,15 +26,18 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) -var _ EventHandler = &EnqueueRequestForOwner{} +var _ EventHandler = &enqueueRequestForOwner{} -var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOwner") +var log = logf.RuntimeLog.WithName("eventhandler").WithName("enqueueRequestForOwner") + +// OwnerOption modifies an EnqueueRequestForOwner EventHandler. +type OwnerOption func(e *enqueueRequestForOwner) // EnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created // the object that was the source of the Event. @@ -42,13 +46,34 @@ var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOw // // - a source.Kind Source with Type of Pod. // -// - a handler.EnqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and IsController set to true. -type EnqueueRequestForOwner struct { - // OwnerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. - OwnerType runtime.Object +// - a handler.enqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and OnlyControllerOwner set to true. +func EnqueueRequestForOwner(scheme *runtime.Scheme, mapper meta.RESTMapper, ownerType client.Object, opts ...OwnerOption) EventHandler { + e := &enqueueRequestForOwner{ + ownerType: ownerType, + mapper: mapper, + } + if err := e.parseOwnerTypeGroupKind(scheme); err != nil { + panic(err) + } + for _, opt := range opts { + opt(e) + } + return e +} + +// OnlyControllerOwner if provided will only look at the first OwnerReference with Controller: true. +func OnlyControllerOwner() OwnerOption { + return func(e *enqueueRequestForOwner) { + e.isController = true + } +} - // IsController if set will only look at the first OwnerReference with Controller: true. - IsController bool +type enqueueRequestForOwner struct { + // ownerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. + ownerType runtime.Object + + // isController if set will only look at the first OwnerReference with Controller: true. + isController bool // groupKind is the cached Group and Kind from OwnerType groupKind schema.GroupKind @@ -58,7 +83,7 @@ type EnqueueRequestForOwner struct { } // Create implements EventHandler. -func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -67,7 +92,7 @@ func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateL } // Update implements EventHandler. -func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.ObjectOld, reqs) e.getOwnerReconcileRequest(evt.ObjectNew, reqs) @@ -77,7 +102,7 @@ func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateL } // Delete implements EventHandler. -func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -86,7 +111,7 @@ func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateL } // Generic implements EventHandler. -func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -96,17 +121,17 @@ func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.Rat // parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false // if the OwnerType could not be parsed using the scheme. -func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { +func (e *enqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { // Get the kinds of the type - kinds, _, err := scheme.ObjectKinds(e.OwnerType) + kinds, _, err := scheme.ObjectKinds(e.ownerType) if err != nil { - log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType)) + log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.ownerType)) return err } // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. if len(kinds) != 1 { - err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) - log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) + err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.ownerType, kinds) + log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.ownerType), "kinds", kinds) return err } // Cache the Group and Kind for the OwnerType @@ -116,7 +141,7 @@ func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) // getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile // owners of object that match e.OwnerType. -func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) { +func (e *enqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) { // Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested // by the user for _, ref := range e.getOwnersReferences(object) { @@ -138,7 +163,7 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, Name: ref.Name, }} - // if owner is not namespaced then we should set the namespace to the empty + // if owner is not namespaced then we should not set the namespace mapping, err := e.mapper.RESTMapping(e.groupKind, refGV.Version) if err != nil { log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind) @@ -153,16 +178,16 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, } } -// getOwnersReferences returns the OwnerReferences for an object as specified by the EnqueueRequestForOwner +// getOwnersReferences returns the OwnerReferences for an object as specified by the enqueueRequestForOwner // - if IsController is true: only take the Controller OwnerReference (if found) // - if IsController is false: take all OwnerReferences. -func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { +func (e *enqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { if object == nil { return nil } // If not filtered as Controller only, then use all the OwnerReferences - if !e.IsController { + if !e.isController { return object.GetOwnerReferences() } // If filtered to a Controller, only take the Controller OwnerReference @@ -172,18 +197,3 @@ func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []met // No Controller OwnerReference found return nil } - -var _ inject.Scheme = &EnqueueRequestForOwner{} - -// InjectScheme is called by the Controller to provide a singleton scheme to the EnqueueRequestForOwner. -func (e *EnqueueRequestForOwner) InjectScheme(s *runtime.Scheme) error { - return e.parseOwnerTypeGroupKind(s) -} - -var _ inject.Mapper = &EnqueueRequestForOwner{} - -// InjectMapper is called by the Controller to provide the rest mapper used by the manager. -func (e *EnqueueRequestForOwner) InjectMapper(m meta.RESTMapper) error { - e.mapper = m - return nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go index 8652d22d..2f380f4f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go @@ -17,6 +17,8 @@ limitations under the License. package handler import ( + "context" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" ) @@ -41,17 +43,17 @@ import ( // Most users shouldn't need to implement their own EventHandler. type EventHandler interface { // Create is called in response to an create event - e.g. Pod Creation. - Create(event.CreateEvent, workqueue.RateLimitingInterface) + Create(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) // Update is called in response to an update event - e.g. Pod Updated. - Update(event.UpdateEvent, workqueue.RateLimitingInterface) + Update(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) // Delete is called in response to a delete event - e.g. Pod Deleted. - Delete(event.DeleteEvent, workqueue.RateLimitingInterface) + Delete(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) // Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or // external trigger request - e.g. reconcile Autoscaling, or a Webhook. - Generic(event.GenericEvent, workqueue.RateLimitingInterface) + Generic(context.Context, event.GenericEvent, workqueue.RateLimitingInterface) } var _ EventHandler = Funcs{} @@ -60,45 +62,45 @@ var _ EventHandler = Funcs{} type Funcs struct { // Create is called in response to an add event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - CreateFunc func(event.CreateEvent, workqueue.RateLimitingInterface) + CreateFunc func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) // Update is called in response to an update event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - UpdateFunc func(event.UpdateEvent, workqueue.RateLimitingInterface) + UpdateFunc func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) // Delete is called in response to a delete event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - DeleteFunc func(event.DeleteEvent, workqueue.RateLimitingInterface) + DeleteFunc func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) // GenericFunc is called in response to a generic event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - GenericFunc func(event.GenericEvent, workqueue.RateLimitingInterface) + GenericFunc func(context.Context, event.GenericEvent, workqueue.RateLimitingInterface) } // Create implements EventHandler. -func (h Funcs) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Create(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { if h.CreateFunc != nil { - h.CreateFunc(e, q) + h.CreateFunc(ctx, e, q) } } // Delete implements EventHandler. -func (h Funcs) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Delete(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { if h.DeleteFunc != nil { - h.DeleteFunc(e, q) + h.DeleteFunc(ctx, e, q) } } // Update implements EventHandler. -func (h Funcs) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Update(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { if h.UpdateFunc != nil { - h.UpdateFunc(e, q) + h.UpdateFunc(ctx, e, q) } } // Generic implements EventHandler. -func (h Funcs) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Generic(ctx context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) { if h.GenericFunc != nil { - h.GenericFunc(e, q) + h.GenericFunc(ctx, e, q) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index f7734695..83aba28c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -33,12 +33,9 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/source" ) -var _ inject.Injector = &Controller{} - // Controller implements controller.Controller. type Controller struct { // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required. @@ -61,10 +58,6 @@ type Controller struct { // the Queue for processing Queue workqueue.RateLimitingInterface - // SetFields is used to inject dependencies into other objects such as Sources, EventHandlers and Predicates - // Deprecated: the caller should handle injected fields itself. - SetFields func(i interface{}) error - // mu is used to synchronize Controller setup mu sync.Mutex @@ -93,6 +86,9 @@ type Controller struct { // RecoverPanic indicates whether the panic caused by reconcile should be recovered. RecoverPanic *bool + + // LeaderElected indicates whether the controller is leader elected or always running. + LeaderElected *bool } // watchDescription contains all the information necessary to start a watch. @@ -127,19 +123,6 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc c.mu.Lock() defer c.mu.Unlock() - // Inject Cache into arguments - if err := c.SetFields(src); err != nil { - return err - } - if err := c.SetFields(evthdler); err != nil { - return err - } - for _, pr := range prct { - if err := c.SetFields(pr); err != nil { - return err - } - } - // Controller hasn't started yet, store the watches locally and return. // // These watches are going to be held on the controller struct until the manager or user calls Start(...). @@ -152,6 +135,14 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc return src.Start(c.ctx, evthdler, c.Queue, prct...) } +// NeedLeaderElection implements the manager.LeaderElectionRunnable interface. +func (c *Controller) NeedLeaderElection() bool { + if c.LeaderElected == nil { + return true + } + return *c.LeaderElected +} + // Start implements controller.Controller. func (c *Controller) Start(ctx context.Context) error { // use an IIFE to get proper lock handling @@ -323,7 +314,11 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { result, err := c.Reconcile(ctx, req) switch { case err != nil: - c.Queue.AddRateLimited(req) + if errors.Is(err, reconcile.TerminalError(nil)) { + ctrlmetrics.TerminalReconcileErrors.WithLabelValues(c.Name).Inc() + } else { + c.Queue.AddRateLimited(req) + } ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc() ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc() log.Error(err, "Reconciler error") @@ -351,12 +346,6 @@ func (c *Controller) GetLogger() logr.Logger { return c.LogConstructor(nil) } -// InjectFunc implement SetFields.Injector. -func (c *Controller) InjectFunc(f inject.Func) error { - c.SetFields = f - return nil -} - // updateMetrics updates prometheus metrics within the controller. func (c *Controller) updateMetrics(reconcileTime time.Duration) { ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds()) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go index baec6692..b74ce062 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go @@ -39,6 +39,13 @@ var ( Help: "Total number of reconciliation errors per controller", }, []string{"controller"}) + // TerminalReconcileErrors is a prometheus counter metrics which holds the total + // number of terminal errors from the Reconciler. + TerminalReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_terminal_reconcile_errors_total", + Help: "Total number of terminal reconciliation errors per controller", + }, []string{"controller"}) + // ReconcileTime is a prometheus metric which keeps track of the duration // of reconciliations. ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ @@ -67,6 +74,7 @@ func init() { metrics.Registry.MustRegister( ReconcileTotal, ReconcileErrors, + TerminalReconcileErrors, ReconcileTime, WorkerCount, ActiveWorkers, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go deleted file mode 100644 index 7057f3db..00000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package objectutil - -import ( - "errors" - "fmt" - - apimeta "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// FilterWithLabels returns a copy of the items in objs matching labelSel. -func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { - outItems := make([]runtime.Object, 0, len(objs)) - for _, obj := range objs { - meta, err := apimeta.Accessor(obj) - if err != nil { - return nil, err - } - if labelSel != nil { - lbls := labels.Set(meta.GetLabels()) - if !labelSel.Matches(lbls) { - continue - } - } - outItems = append(outItems, obj.DeepCopyObject()) - } - return outItems, nil -} - -// IsAPINamespaced returns true if the object is namespace scoped. -// For unstructured objects the gvk is found from the object itself. -func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return false, err - } - - return IsAPINamespacedWithGVK(gvk, scheme, restmapper) -} - -// IsAPINamespacedWithGVK returns true if the object having the provided -// GVK is namespace scoped. -func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { - restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) - if err != nil { - return false, fmt.Errorf("failed to get restmapping: %w", err) - } - - scope := restmapping.Scope.Name() - - if scope == "" { - return false, errors.New("scope cannot be identified, empty scope returned") - } - - if scope != apimeta.RESTScopeNameRoot { - return true, nil - } - return false, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go index 9d8b2f07..21f0146b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go @@ -19,6 +19,7 @@ package recorder import ( "context" "fmt" + "net/http" "sync" "github.com/go-logr/logr" @@ -110,8 +111,12 @@ func (p *Provider) getBroadcaster() record.EventBroadcaster { } // NewProvider create a new Provider instance. -func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { - corev1Client, err := corev1client.NewForConfig(config) +func NewProvider(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { + if httpClient == nil { + panic("httpClient must not be nil") + } + + corev1Client, err := corev1client.NewForConfigAndClient(config, httpClient) if err != nil { return nil, fmt.Errorf("failed to init client: %w", err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go similarity index 67% rename from vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go index f0cfe212..ae8404a1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go @@ -17,6 +17,7 @@ limitations under the License. package internal import ( + "context" "fmt" "k8s.io/client-go/tools/cache" @@ -31,17 +32,39 @@ import ( var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") -var _ cache.ResourceEventHandler = EventHandler{} +// NewEventHandler creates a new EventHandler. +func NewEventHandler(ctx context.Context, queue workqueue.RateLimitingInterface, handler handler.EventHandler, predicates []predicate.Predicate) *EventHandler { + return &EventHandler{ + ctx: ctx, + handler: handler, + queue: queue, + predicates: predicates, + } +} // EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface. type EventHandler struct { - EventHandler handler.EventHandler - Queue workqueue.RateLimitingInterface - Predicates []predicate.Predicate + // ctx stores the context that created the event handler + // that is used to propagate cancellation signals to each handler function. + ctx context.Context + + handler handler.EventHandler + queue workqueue.RateLimitingInterface + predicates []predicate.Predicate +} + +// HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs +// TODO: switch to ResourceEventHandlerDetailedFuncs with client-go 1.27 +func (e *EventHandler) HandlerFuncs() cache.ResourceEventHandlerFuncs { + return cache.ResourceEventHandlerFuncs{ + AddFunc: e.OnAdd, + UpdateFunc: e.OnUpdate, + DeleteFunc: e.OnDelete, + } } // OnAdd creates CreateEvent and calls Create on EventHandler. -func (e EventHandler) OnAdd(obj interface{}) { +func (e *EventHandler) OnAdd(obj interface{}) { c := event.CreateEvent{} // Pull Object out of the object @@ -53,18 +76,20 @@ func (e EventHandler) OnAdd(obj interface{}) { return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Create(c) { return } } // Invoke create handler - e.EventHandler.Create(c, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Create(ctx, c, e.queue) } // OnUpdate creates UpdateEvent and calls Update on EventHandler. -func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { +func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) { u := event.UpdateEvent{} if o, ok := oldObj.(client.Object); ok { @@ -84,18 +109,20 @@ func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Update(u) { return } } // Invoke update handler - e.EventHandler.Update(u, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Update(ctx, u, e.queue) } // OnDelete creates DeleteEvent and calls Delete on EventHandler. -func (e EventHandler) OnDelete(obj interface{}) { +func (e *EventHandler) OnDelete(obj interface{}) { d := event.DeleteEvent{} // Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a @@ -114,6 +141,9 @@ func (e EventHandler) OnDelete(obj interface{}) { return } + // Set DeleteStateUnknown to true + d.DeleteStateUnknown = true + // Set obj to the tombstone obj obj = tombstone.Obj } @@ -127,12 +157,14 @@ func (e EventHandler) OnDelete(obj interface{}) { return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Delete(d) { return } } // Invoke delete handler - e.EventHandler.Delete(d, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Delete(ctx, d, e.queue) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go new file mode 100644 index 00000000..b3a82271 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go @@ -0,0 +1,117 @@ +package internal + +import ( + "context" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Kind struct { + // Type is the type of object to watch. e.g. &v1.Pod{} + Type client.Object + + // Cache used to watch APIs + Cache cache.Cache + + // started may contain an error if one was encountered during startup. If its closed and does not + // contain an error, startup and syncing finished. + started chan error + startCancel func() +} + +// Start is internal and should be called only by the Controller to register an EventHandler with the Informer +// to enqueue reconcile.Requests. +func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + if ks.Type == nil { + return fmt.Errorf("must create Kind with a non-nil object") + } + if ks.Cache == nil { + return fmt.Errorf("must create Kind with a non-nil cache") + } + + // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not + // sync that informer (most commonly due to RBAC issues). + ctx, ks.startCancel = context.WithCancel(ctx) + ks.started = make(chan error) + go func() { + var ( + i cache.Informer + lastErr error + ) + + // Tries to get an informer until it returns true, + // an error or the specified context is cancelled or expired. + if err := wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) { + // Lookup the Informer from the Cache and add an EventHandler which populates the Queue + i, lastErr = ks.Cache.GetInformer(ctx, ks.Type) + if lastErr != nil { + kindMatchErr := &meta.NoKindMatchError{} + switch { + case errors.As(lastErr, &kindMatchErr): + log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", + "kind", kindMatchErr.GroupKind) + case runtime.IsNotRegisteredError(lastErr): + log.Error(lastErr, "kind must be registered to the Scheme") + default: + log.Error(lastErr, "failed to get informer from cache") + } + return false, nil // Retry. + } + return true, nil + }); err != nil { + if lastErr != nil { + ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr) + return + } + ks.started <- err + return + } + + _, err := i.AddEventHandler(NewEventHandler(ctx, queue, handler, prct).HandlerFuncs()) + if err != nil { + ks.started <- err + return + } + if !ks.Cache.WaitForCacheSync(ctx) { + // Would be great to return something more informative here + ks.started <- errors.New("cache did not sync") + } + close(ks.started) + }() + + return nil +} + +func (ks *Kind) String() string { + if ks.Type != nil { + return fmt.Sprintf("kind source: %T", ks.Type) + } + return "kind source: unknown type" +} + +// WaitForSync implements SyncingSource to allow controllers to wait with starting +// workers until the cache is synced. +func (ks *Kind) WaitForSync(ctx context.Context) error { + select { + case err := <-ks.started: + return err + case <-ctx.Done(): + ks.startCancel() + if errors.Is(ctx.Err(), context.Canceled) { + return nil + } + return fmt.Errorf("timed out waiting for cache to be synced for Kind %T", ks.Type) + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go index c82447d9..c27b4305 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -25,7 +25,7 @@ import ( // loggerPromise knows how to populate a concrete logr.Logger // with options, given an actual base logger later on down the line. type loggerPromise struct { - logger *DelegatingLogSink + logger *delegatingLogSink childPromises []*loggerPromise promisesLock sync.Mutex @@ -33,7 +33,7 @@ type loggerPromise struct { tags []interface{} } -func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromise { +func (p *loggerPromise) WithName(l *delegatingLogSink, name string) *loggerPromise { res := &loggerPromise{ logger: l, name: &name, @@ -47,7 +47,7 @@ func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromi } // WithValues provides a new Logger with the tags appended. -func (p *loggerPromise) WithValues(l *DelegatingLogSink, tags ...interface{}) *loggerPromise { +func (p *loggerPromise) WithValues(l *delegatingLogSink, tags ...interface{}) *loggerPromise { res := &loggerPromise{ logger: l, tags: tags, @@ -84,12 +84,12 @@ func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) { } } -// DelegatingLogSink is a logsink that delegates to another logr.LogSink. +// delegatingLogSink is a logsink that delegates to another logr.LogSink. // If the underlying promise is not nil, it registers calls to sub-loggers with // the logging factory to be populated later, and returns a new delegating // logger. It expects to have *some* logr.Logger set at all times (generally // a no-op logger before the promises are fulfilled). -type DelegatingLogSink struct { +type delegatingLogSink struct { lock sync.RWMutex logger logr.LogSink promise *loggerPromise @@ -97,7 +97,8 @@ type DelegatingLogSink struct { } // Init implements logr.LogSink. -func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { +func (l *delegatingLogSink) Init(info logr.RuntimeInfo) { + eventuallyFulfillRoot() l.lock.Lock() defer l.lock.Unlock() l.info = info @@ -106,7 +107,8 @@ func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info // logs. -func (l *DelegatingLogSink) Enabled(level int) bool { +func (l *delegatingLogSink) Enabled(level int) bool { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() return l.logger.Enabled(level) @@ -118,7 +120,8 @@ func (l *DelegatingLogSink) Enabled(level int) bool { // the log line. The key/value pairs can then be used to add additional // variable information. The key/value pairs should alternate string // keys and arbitrary values. -func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { +func (l *delegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() l.logger.Info(level, msg, keysAndValues...) @@ -132,14 +135,16 @@ func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interfa // The msg field should be used to add context to any underlying error, // while the err field should be used to attach the actual error that // triggered this log line, if present. -func (l *DelegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { +func (l *delegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() l.logger.Error(err, msg, keysAndValues...) } // WithName provides a new Logger with the name appended. -func (l *DelegatingLogSink) WithName(name string) logr.LogSink { +func (l *delegatingLogSink) WithName(name string) logr.LogSink { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() @@ -151,7 +156,7 @@ func (l *DelegatingLogSink) WithName(name string) logr.LogSink { return sink } - res := &DelegatingLogSink{logger: l.logger} + res := &delegatingLogSink{logger: l.logger} promise := l.promise.WithName(res, name) res.promise = promise @@ -159,7 +164,8 @@ func (l *DelegatingLogSink) WithName(name string) logr.LogSink { } // WithValues provides a new Logger with the tags appended. -func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { +func (l *delegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() @@ -171,7 +177,7 @@ func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { return sink } - res := &DelegatingLogSink{logger: l.logger} + res := &delegatingLogSink{logger: l.logger} promise := l.promise.WithValues(res, tags...) res.promise = promise @@ -181,16 +187,16 @@ func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { // Fulfill switches the logger over to use the actual logger // provided, instead of the temporary initial one, if this method // has not been previously called. -func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) { +func (l *delegatingLogSink) Fulfill(actual logr.LogSink) { if l.promise != nil { l.promise.Fulfill(actual) } } -// NewDelegatingLogSink constructs a new DelegatingLogSink which uses +// newDelegatingLogSink constructs a new DelegatingLogSink which uses // the given logger before its promise is fulfilled. -func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink { - l := &DelegatingLogSink{ +func newDelegatingLogSink(initial logr.LogSink) *delegatingLogSink { + l := &delegatingLogSink{ logger: initial, promise: &loggerPromise{promisesLock: sync.Mutex{}}, } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go index 082dce3a..a79151c6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go @@ -35,7 +35,10 @@ package log import ( "context" - "sync" + "fmt" + "os" + "runtime/debug" + "sync/atomic" "time" "github.com/go-logr/logr" @@ -43,35 +46,24 @@ import ( // SetLogger sets a concrete logging implementation for all deferred Loggers. func SetLogger(l logr.Logger) { - loggerWasSetLock.Lock() - defer loggerWasSetLock.Unlock() - - loggerWasSet = true - dlog.Fulfill(l.GetSink()) + logFullfilled.Store(true) + rootLog.Fulfill(l.GetSink()) } -// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries -// lifetime, it will never get set. The DelegatingLogSink causes a high number of memory -// allocations when not given an actual Logger, so we set a NullLogSink to avoid that. -// -// We need to keep the DelegatingLogSink because we have various inits() that get a logger from -// here. They will always get executed before any code that imports controller-runtime -// has a chance to run and hence to set an actual logger. -func init() { - // Init is blocking, so start a new goroutine - go func() { - time.Sleep(30 * time.Second) - loggerWasSetLock.Lock() - defer loggerWasSetLock.Unlock() - if !loggerWasSet { - dlog.Fulfill(NullLogSink{}) +func eventuallyFulfillRoot() { + if logFullfilled.Load() { + return + } + if time.Since(rootLogCreated).Seconds() >= 30 { + if logFullfilled.CompareAndSwap(false, true) { + fmt.Fprintf(os.Stderr, "[controller-runtime] log.SetLogger(...) was never called, logs will not be displayed:\n%s", debug.Stack()) + SetLogger(logr.New(NullLogSink{})) } - }() + } } var ( - loggerWasSetLock sync.Mutex - loggerWasSet bool + logFullfilled atomic.Bool ) // Log is the base logger used by kubebuilder. It delegates @@ -80,8 +72,10 @@ var ( // the first 30 seconds of a binaries lifetime, it will get // set to a NullLogSink. var ( - dlog = NewDelegatingLogSink(NullLogSink{}) - Log = logr.New(dlog) + rootLog, rootLogCreated = func() (*delegatingLogSink, time.Time) { + return newDelegatingLogSink(NullLogSink{}), time.Now() + }() + Log = logr.New(rootLog) ) // FromContext returns a logger with predefined values from a context.Context. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go index 98244702..3b4ebfda 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go @@ -24,7 +24,6 @@ import ( "go.uber.org/zap/zapcore" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" ) // KubeAwareEncoder is a Kubernetes-aware Zap Encoder. @@ -41,21 +40,6 @@ type KubeAwareEncoder struct { Verbose bool } -// namespacedNameWrapper is a zapcore.ObjectMarshaler for Kubernetes NamespacedName. -type namespacedNameWrapper struct { - types.NamespacedName -} - -func (w namespacedNameWrapper) MarshalLogObject(enc zapcore.ObjectEncoder) error { - if w.Namespace != "" { - enc.AddString("namespace", w.Namespace) - } - - enc.AddString("name", w.Name) - - return nil -} - // kubeObjectWrapper is a zapcore.ObjectMarshaler for Kubernetes objects. type kubeObjectWrapper struct { obj runtime.Object @@ -119,12 +103,6 @@ func (k *KubeAwareEncoder) EncodeEntry(entry zapcore.Entry, fields []zapcore.Fie Key: field.Key, Interface: kubeObjectWrapper{obj: val}, } - case types.NamespacedName: - fields[i] = zapcore.Field{ - Type: zapcore.ObjectMarshalerType, - Key: field.Key, - Interface: namespacedNameWrapper{NamespacedName: val}, - } } } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index 5ccff8b7..f298229e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -18,11 +18,11 @@ package manager import ( "context" - "crypto/tls" "errors" "fmt" "net" "net/http" + "net/http/pprof" "sync" "sync/atomic" "time" @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" @@ -41,12 +40,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" - "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/config" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" "sigs.k8s.io/controller-runtime/pkg/metrics" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -107,8 +105,11 @@ type controllerManager struct { // Healthz probe handler healthzHandler *healthz.Handler - // controllerOptions are the global controller options. - controllerOptions v1alpha1.ControllerConfigurationSpec + // pprofListener is used to serve pprof + pprofListener net.Listener + + // controllerConfig are the global controller options. + controllerConfig config.Controller // Logger is the logger that should be used by this manager. // If none is set, it defaults to log.Log global logger. @@ -128,18 +129,7 @@ type controllerManager struct { // election was configured. elected chan struct{} - // port is the port that the webhook server serves at. - port int - // host is the hostname that the webhook server binds to. - host string - // CertDir is the directory that contains the server key and certificate. - // if not set, webhook server would look up the server key and certificate in - // {TempDir}/k8s-webhook-server/serving-certs - certDir string - // tlsOpts is used to allow configuring the TLS config used for the webhook server. - tlsOpts []func(*tls.Config) - - webhookServer *webhook.Server + webhookServer webhook.Server // webhookServerOnce will be called in GetWebhookServer() to optionally initialize // webhookServer if unset, and Add() it to controllerManager. webhookServerOnce sync.Once @@ -191,31 +181,9 @@ func (cm *controllerManager) Add(r Runnable) error { } func (cm *controllerManager) add(r Runnable) error { - // Set dependencies on the object - if err := cm.SetFields(r); err != nil { - return err - } return cm.runnables.Add(r) } -// Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. -func (cm *controllerManager) SetFields(i interface{}) error { - if err := cm.cluster.SetFields(i); err != nil { - return err - } - if _, err := inject.InjectorInto(cm.SetFields, i); err != nil { - return err - } - if _, err := inject.StopChannelInto(cm.internalProceduresStop, i); err != nil { - return err - } - if _, err := inject.LoggerInto(cm.logger, i); err != nil { - return err - } - - return nil -} - // AddMetricsExtraHandler adds extra handler served on path to the http server that serves metrics. func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Handler) error { cm.Lock() @@ -272,6 +240,10 @@ func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) return nil } +func (cm *controllerManager) GetHTTPClient() *http.Client { + return cm.cluster.GetHTTPClient() +} + func (cm *controllerManager) GetConfig() *rest.Config { return cm.cluster.GetConfig() } @@ -304,15 +276,10 @@ func (cm *controllerManager) GetAPIReader() client.Reader { return cm.cluster.GetAPIReader() } -func (cm *controllerManager) GetWebhookServer() *webhook.Server { +func (cm *controllerManager) GetWebhookServer() webhook.Server { cm.webhookServerOnce.Do(func() { if cm.webhookServer == nil { - cm.webhookServer = &webhook.Server{ - Port: cm.port, - Host: cm.host, - CertDir: cm.certDir, - TLSOpts: cm.tlsOpts, - } + panic("webhook should not be nil") } if err := cm.Add(cm.webhookServer); err != nil { panic(fmt.Sprintf("unable to add webhook server to the controller manager: %s", err)) @@ -325,23 +292,29 @@ func (cm *controllerManager) GetLogger() logr.Logger { return cm.logger } -func (cm *controllerManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec { - return cm.controllerOptions +func (cm *controllerManager) GetControllerOptions() config.Controller { + return cm.controllerConfig } -func (cm *controllerManager) serveMetrics() { +func (cm *controllerManager) addMetricsServer() error { + mux := http.NewServeMux() + srv := httpserver.New(mux) + handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ ErrorHandling: promhttp.HTTPErrorOnError, }) // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics - mux := http.NewServeMux() mux.Handle(defaultMetricsEndpoint, handler) for path, extraHandler := range cm.metricsExtraHandlers { mux.Handle(path, extraHandler) } - server := httpserver.New(mux) - go cm.httpServe("metrics", cm.logger.WithValues("path", defaultMetricsEndpoint), server, cm.metricsListener) + return cm.add(&server{ + Kind: "metrics", + Log: cm.logger.WithValues("path", defaultMetricsEndpoint), + Server: srv, + Listener: cm.metricsListener, + }) } func (cm *controllerManager) serveHealthProbes() { @@ -362,6 +335,24 @@ func (cm *controllerManager) serveHealthProbes() { go cm.httpServe("health probe", cm.logger, server, cm.healthProbeListener) } +func (cm *controllerManager) addPprofServer() error { + mux := http.NewServeMux() + srv := httpserver.New(mux) + + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + + return cm.add(&server{ + Kind: "pprof", + Log: cm.logger, + Server: srv, + Listener: cm.pprofListener, + }) +} + func (cm *controllerManager) httpServe(kind string, log logr.Logger, server *http.Server, ln net.Listener) { log = log.WithValues("kind", kind, "addr", ln.Addr()) @@ -451,7 +442,9 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // (If we don't serve metrics for non-leaders, prometheus will still scrape // the pod but will get a connection refused). if cm.metricsListener != nil { - cm.serveMetrics() + if err := cm.addMetricsServer(); err != nil { + return fmt.Errorf("failed to add metrics server: %w", err) + } } // Serve health probes. @@ -459,6 +452,13 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { cm.serveHealthProbes() } + // Add pprof server + if cm.pprofListener != nil { + if err := cm.addPprofServer(); err != nil { + return fmt.Errorf("failed to add pprof server: %w", err) + } + } + // First start any webhook servers, which includes conversion, validation, and defaulting // webhooks that are registered. // @@ -466,22 +466,22 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // between conversion webhooks and the cache sync (usually initial list) which causes the webhooks // to never start because no cache can be populated. if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil { - if !errors.Is(err, wait.ErrWaitTimeout) { - return err + if err != nil { + return fmt.Errorf("failed to start webhooks: %w", err) } } // Start and wait for caches. if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil { - if !errors.Is(err, wait.ErrWaitTimeout) { - return err + if err != nil { + return fmt.Errorf("failed to start caches: %w", err) } } // Start the non-leaderelection Runnables after the cache has synced. if err := cm.runnables.Others.Start(cm.internalCtx); err != nil { - if !errors.Is(err, wait.ErrWaitTimeout) { - return err + if err != nil { + return fmt.Errorf("failed to start other runnables: %w", err) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 2facb1c9..72a4a780 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -19,6 +19,7 @@ package manager import ( "context" "crypto/tls" + "errors" "fmt" "net" "net/http" @@ -33,6 +34,7 @@ import ( "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" @@ -44,7 +46,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/recorder" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -55,8 +56,7 @@ type Manager interface { cluster.Cluster // Add will set requested dependencies on the component, and cause the component to be - // started when Start is called. Add will inject any dependencies for which the argument - // implements the inject interface - e.g. inject.Client. + // started when Start is called. // Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either // non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled). Add(Runnable) error @@ -88,13 +88,13 @@ type Manager interface { Start(ctx context.Context) error // GetWebhookServer returns a webhook.Server - GetWebhookServer() *webhook.Server + GetWebhookServer() webhook.Server // GetLogger returns this manager's logger. GetLogger() logr.Logger // GetControllerOptions returns controller global configuration options. - GetControllerOptions() v1alpha1.ControllerConfigurationSpec + GetControllerOptions() config.Controller } // Options are the arguments for creating a new Manager. @@ -102,10 +102,44 @@ type Options struct { // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources. // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better // to pass your own scheme in. See the documentation in pkg/scheme for more information. + // + // If set, the Scheme will be used to create the default Client and Cache. Scheme *runtime.Scheme - // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs - MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs. + // + // If set, the RESTMapper returned by this function is used to create the RESTMapper + // used by the Client and Cache. + MapperProvider func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) + + // Cache is the cache.Options that will be used to create the default Cache. + // By default, the cache will watch and list requested objects in all namespaces. + Cache cache.Options + + // NewCache is the function that will create the cache to be used + // by the manager. If not set this will use the default new cache function. + // + // When using a custom NewCache, the Cache options will be passed to the + // NewCache function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewCache if you know what you are doing. + NewCache cache.NewCacheFunc + + // Client is the client.Options that will be used to create the default Client. + // By default, the client will use the cache for reads and direct calls for writes. + Client client.Options + + // NewClient is the func that creates the client to be used by the manager. + // If not set this will create a Client backed by a Cache for read operations + // and a direct Client for write operations. + // + // When using a custom NewClient, the Client options will be passed to the + // NewClient function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewClient if you know what you are doing. + NewClient client.NewClientFunc // SyncPeriod determines the minimum frequency at which watched resources are // reconciled. A lower period will correct entropy more quickly, but reduce @@ -132,6 +166,8 @@ type Options struct { // is "done" with an object, and would otherwise not requeue it, i.e., we // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, // instead of `reconcile.Result{}`. + // + // Deprecated: Use Cache.SyncPeriod instead. SyncPeriod *time.Duration // Logger is the logger that should be used by this manager. @@ -217,6 +253,8 @@ type Options struct { // Note: If a namespace is specified, controllers can still Watch for a // cluster-scoped resource (e.g Node). For namespaced resources, the cache // will only hold objects from the desired namespace. + // + // Deprecated: Use Cache.Namespaces instead. Namespace string // MetricsBindAddress is the TCP address that the controller should bind to @@ -235,11 +273,22 @@ type Options struct { // Liveness probe endpoint name, defaults to "healthz" LivenessEndpointName string + // PprofBindAddress is the TCP address that the controller should bind to + // for serving pprof. + // It can be set to "" or "0" to disable the pprof serving. + // Since pprof may contain sensitive information, make sure to protect it + // before exposing it to public. + PprofBindAddress string + // Port is the port that the webhook server serves at. // It is used to set webhook.Server.Port if WebhookServer is not set. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. Port int // Host is the hostname that the webhook server binds to. // It is used to set webhook.Server.Host if WebhookServer is not set. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. Host string // CertDir is the directory that contains the server key and certificate. @@ -247,26 +296,19 @@ type Options struct { // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate // must be named tls.key and tls.crt, respectively. // It is used to set webhook.Server.CertDir if WebhookServer is not set. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. CertDir string // TLSOpts is used to allow configuring the TLS config used for the webhook server. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. TLSOpts []func(*tls.Config) // WebhookServer is an externally configured webhook.Server. By default, // a Manager will create a default server using Port, Host, and CertDir; // if this is set, the Manager will use this server instead. - WebhookServer *webhook.Server - - // Functions to allow for a user to customize values that will be injected. - - // NewCache is the function that will create the cache to be used - // by the manager. If not set this will use the default new cache function. - NewCache cache.NewCacheFunc - - // NewClient is the func that creates the client to be used by the manager. - // If not set this will create the default DelegatingClient that will - // use the cache for reads and the client for writes. - NewClient cluster.NewClientFunc + WebhookServer webhook.Server // BaseContext is the function that provides Context values to Runnables // managed by the Manager. If a BaseContext function isn't provided, Runnables @@ -275,10 +317,14 @@ type Options struct { // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it // for the given objects. + // + // Deprecated: Use Client.Cache.DisableCacheFor instead. ClientDisableCacheFor []client.Object // DryRunClient specifies whether the client should be configured to enforce // dryRun mode. + // + // Deprecated: Use Client.DryRun instead. DryRunClient bool // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API @@ -297,7 +343,7 @@ type Options struct { // Controller contains global configuration options for controllers // registered within this manager. // +optional - Controller v1alpha1.ControllerConfigurationSpec + Controller config.Controller // makeBroadcaster allows deferring the creation of the broadcaster to // avoid leaking goroutines if we never call Start on this manager. It also @@ -306,10 +352,11 @@ type Options struct { makeBroadcaster intrec.EventBroadcasterProducer // Dependency injection for testing - newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) + newRecorderProvider func(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) newMetricsListener func(addr string) (net.Listener, error) newHealthProbeListener func(addr string) (net.Listener, error) + newPprofListener func(addr string) (net.Listener, error) } // BaseContextFunc is a function used to provide a base Context to Runnables @@ -345,6 +392,9 @@ type LeaderElectionRunnable interface { // New returns a new Manager for creating Controllers. func New(config *rest.Config, options Options) (Manager, error) { + if config == nil { + return nil, errors.New("must specify Config") + } // Set default values for options fields options = setOptionsDefaults(options) @@ -353,21 +403,28 @@ func New(config *rest.Config, options Options) (Manager, error) { clusterOptions.MapperProvider = options.MapperProvider clusterOptions.Logger = options.Logger clusterOptions.SyncPeriod = options.SyncPeriod - clusterOptions.Namespace = options.Namespace clusterOptions.NewCache = options.NewCache clusterOptions.NewClient = options.NewClient - clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor - clusterOptions.DryRunClient = options.DryRunClient - clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck + clusterOptions.Cache = options.Cache + clusterOptions.Client = options.Client + clusterOptions.Namespace = options.Namespace //nolint:staticcheck + clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor //nolint:staticcheck + clusterOptions.DryRunClient = options.DryRunClient //nolint:staticcheck + clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck }) if err != nil { return nil, err } + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + // Create the recorder provider to inject event recorders for the components. // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific // to the particular controller that it's being injected into, rather than a generic one like is here. - recorderProvider, err := options.newRecorderProvider(config, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + recorderProvider, err := options.newRecorderProvider(config, cluster.GetHTTPClient(), cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } @@ -381,7 +438,7 @@ func New(config *rest.Config, options Options) (Manager, error) { leaderRecorderProvider = recorderProvider } else { leaderConfig = rest.CopyConfig(options.LeaderElectionConfig) - leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetHTTPClient(), cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } @@ -419,6 +476,13 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, err } + // Create pprof listener. This will throw an error if the bind + // address is invalid or already in use. + pprofListener, err := options.newPprofListener(options.PprofBindAddress) + if err != nil { + return nil, fmt.Errorf("failed to new pprof listener: %w", err) + } + errChan := make(chan error) runnables := newRunnables(options.BaseContext, errChan) @@ -431,13 +495,9 @@ func New(config *rest.Config, options Options) (Manager, error) { resourceLock: resourceLock, metricsListener: metricsListener, metricsExtraHandlers: metricsExtraHandlers, - controllerOptions: options.Controller, + controllerConfig: options.Controller, logger: options.Logger, elected: make(chan struct{}), - port: options.Port, - host: options.Host, - certDir: options.CertDir, - tlsOpts: options.TLSOpts, webhookServer: options.WebhookServer, leaderElectionID: options.LeaderElectionID, leaseDuration: *options.LeaseDuration, @@ -446,6 +506,7 @@ func New(config *rest.Config, options Options) (Manager, error) { healthProbeListener: healthProbeListener, readinessEndpointName: options.ReadinessEndpointName, livenessEndpointName: options.LivenessEndpointName, + pprofListener: pprofListener, gracefulShutdownTimeout: *options.GracefulShutdownTimeout, internalProceduresStop: make(chan struct{}), leaderElectionStopped: make(chan struct{}), @@ -456,14 +517,14 @@ func New(config *rest.Config, options Options) (Manager, error) { // AndFrom will use a supplied type and convert to Options // any options already set on Options will be ignored, this is used to allow // cli flags to override anything specified in the config file. +// +// Deprecated: This function has been deprecated and will be removed in a future release, +// The Component Configuration package has been unmaintained for over a year and is no longer +// actively developed. Users should migrate to their own configuration format +// and configure Manager.Options directly. +// See https://github.com/kubernetes-sigs/controller-runtime/issues/895 +// for more information, feedback, and comments. func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) { - if inj, wantsScheme := loader.(inject.Scheme); wantsScheme { - err := inj.InjectScheme(o.Scheme) - if err != nil { - return o, err - } - } - newObj, err := loader.Complete() if err != nil { return o, err @@ -498,18 +559,23 @@ func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, if o.Port == 0 && newObj.Webhook.Port != nil { o.Port = *newObj.Webhook.Port } - if o.Host == "" && newObj.Webhook.Host != "" { o.Host = newObj.Webhook.Host } - if o.CertDir == "" && newObj.Webhook.CertDir != "" { o.CertDir = newObj.Webhook.CertDir } + if o.WebhookServer == nil { + o.WebhookServer = webhook.NewServer(webhook.Options{ + Port: o.Port, + Host: o.Host, + CertDir: o.CertDir, + }) + } if newObj.Controller != nil { - if o.Controller.CacheSyncTimeout == nil && newObj.Controller.CacheSyncTimeout != nil { - o.Controller.CacheSyncTimeout = newObj.Controller.CacheSyncTimeout + if o.Controller.CacheSyncTimeout == 0 && newObj.Controller.CacheSyncTimeout != nil { + o.Controller.CacheSyncTimeout = *newObj.Controller.CacheSyncTimeout } if len(o.Controller.GroupKindConcurrency) == 0 && len(newObj.Controller.GroupKindConcurrency) > 0 { @@ -521,6 +587,13 @@ func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, } // AndFromOrDie will use options.AndFrom() and will panic if there are errors. +// +// Deprecated: This function has been deprecated and will be removed in a future release, +// The Component Configuration package has been unmaintained for over a year and is no longer +// actively developed. Users should migrate to their own configuration format +// and configure Manager.Options directly. +// See https://github.com/kubernetes-sigs/controller-runtime/issues/895 +// for more information, feedback, and comments. func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options { o, err := o.AndFrom(loader) if err != nil { @@ -579,6 +652,19 @@ func defaultHealthProbeListener(addr string) (net.Listener, error) { return ln, nil } +// defaultPprofListener creates the default pprof listener bound to the given address. +func defaultPprofListener(addr string) (net.Listener, error) { + if addr == "" || addr == "0" { + return nil, nil + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("error listening on %s: %w", addr, err) + } + return ln, nil +} + // defaultBaseContext is used as the BaseContext value in Options if one // has not already been set. func defaultBaseContext() context.Context { @@ -639,6 +725,10 @@ func setOptionsDefaults(options Options) Options { options.newHealthProbeListener = defaultHealthProbeListener } + if options.newPprofListener == nil { + options.newPprofListener = defaultPprofListener + } + if options.GracefulShutdownTimeout == nil { gracefulShutdownTimeout := defaultGracefulShutdownPeriod options.GracefulShutdownTimeout = &gracefulShutdownTimeout @@ -652,5 +742,14 @@ func setOptionsDefaults(options Options) Options { options.BaseContext = defaultBaseContext } + if options.WebhookServer == nil { + options.WebhookServer = webhook.NewServer(webhook.Options{ + Host: options.Host, + Port: options.Port, + CertDir: options.CertDir, + TLSOpts: options.TLSOpts, + }) + } + return options } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go index f7b91a20..549741e6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go @@ -56,7 +56,7 @@ func (r *runnables) Add(fn Runnable) error { return r.Caches.Add(fn, func(ctx context.Context) bool { return runnable.GetCache().WaitForCacheSync(ctx) }) - case *webhook.Server: + case webhook.Server: return r.Webhooks.Add(fn, nil) case LeaderElectionRunnable: if !runnable.NeedLeaderElection() { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go new file mode 100644 index 00000000..b6509f48 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "errors" + "net" + "net/http" + + "github.com/go-logr/logr" +) + +// server is a general purpose HTTP server Runnable for a manager +// to serve some internal handlers such as health probes, metrics and profiling. +type server struct { + Kind string + Log logr.Logger + Server *http.Server + Listener net.Listener +} + +func (s *server) Start(ctx context.Context) error { + log := s.Log.WithValues("kind", s.Kind, "addr", s.Listener.Addr()) + + serverShutdown := make(chan struct{}) + go func() { + <-ctx.Done() + log.Info("shutting down server") + if err := s.Server.Shutdown(context.Background()); err != nil { + log.Error(err, "error shutting down server") + } + close(serverShutdown) + }() + + log.Info("starting server") + if err := s.Server.Serve(s.Listener); err != nil && !errors.Is(err, http.ErrServerClosed) { + return err + } + + <-serverShutdown + return nil +} + +func (s *server) NeedLeaderElection() bool { + return false +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go index a8b43ea0..ff28998c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go @@ -18,8 +18,6 @@ package metrics import ( "context" - "net/url" - "time" "github.com/prometheus/client_golang/prometheus" clientmetrics "k8s.io/client-go/tools/metrics" @@ -29,70 +27,9 @@ import ( // that client-go registers metrics. We copy the names and formats // from Kubernetes so that we match the core controllers. -// Metrics subsystem and all of the keys used by the rest client. -const ( - RestClientSubsystem = "rest_client" - LatencyKey = "request_latency_seconds" - ResultKey = "requests_total" -) - var ( // client metrics. - // RequestLatency reports the request latency in seconds per verb/URL. - // Deprecated: This metric is deprecated for removal in a future release: using the URL as a - // dimension results in cardinality explosion for some consumers. It was deprecated upstream - // in k8s v1.14 and hidden in v1.17 via https://github.com/kubernetes/kubernetes/pull/83836. - // It is not registered by default. To register: - // import ( - // clientmetrics "k8s.io/client-go/tools/metrics" - // clmetrics "sigs.k8s.io/controller-runtime/metrics" - // ) - // - // func init() { - // clmetrics.Registry.MustRegister(clmetrics.RequestLatency) - // clientmetrics.Register(clientmetrics.RegisterOpts{ - // RequestLatency: clmetrics.LatencyAdapter - // }) - // } - RequestLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: RestClientSubsystem, - Name: LatencyKey, - Help: "Request latency in seconds. Broken down by verb and URL.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), - }, []string{"verb", "url"}) - - // requestLatency is a Prometheus Histogram metric type partitioned by - // "verb", and "host" labels. It is used for the rest client latency metrics. - requestLatency = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_request_duration_seconds", - Help: "Request latency in seconds. Broken down by verb, and host.", - Buckets: []float64{0.005, 0.025, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 15.0, 30.0, 60.0}, - }, - []string{"verb", "host"}, - ) - - requestSize = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_request_size_bytes", - Help: "Request size in bytes. Broken down by verb and host.", - // 64 bytes to 16MB - Buckets: []float64{64, 256, 512, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216}, - }, - []string{"verb", "host"}, - ) - - responseSize = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_response_size_bytes", - Help: "Response size in bytes. Broken down by verb and host.", - // 64 bytes to 16MB - Buckets: []float64{64, 256, 512, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216}, - }, - []string{"verb", "host"}, - ) - requestResult = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "rest_client_requests_total", @@ -109,17 +46,11 @@ func init() { // registerClientMetrics sets up the client latency metrics from client-go. func registerClientMetrics() { // register the metrics with our registry - Registry.MustRegister(requestLatency) - Registry.MustRegister(requestSize) - Registry.MustRegister(responseSize) Registry.MustRegister(requestResult) // register the metrics with client-go clientmetrics.Register(clientmetrics.RegisterOpts{ - RequestLatency: &LatencyAdapter{metric: requestLatency}, - RequestSize: &sizeAdapter{metric: requestSize}, - ResponseSize: &sizeAdapter{metric: responseSize}, - RequestResult: &resultAdapter{metric: requestResult}, + RequestResult: &resultAdapter{metric: requestResult}, }) } @@ -131,24 +62,6 @@ func registerClientMetrics() { // copied (more-or-less directly) from k8s.io/kubernetes setup code // (which isn't anywhere in an easily-importable place). -// LatencyAdapter implements LatencyMetric. -type LatencyAdapter struct { - metric *prometheus.HistogramVec -} - -// Observe increments the request latency metric for the given verb/URL. -func (l *LatencyAdapter) Observe(_ context.Context, verb string, u url.URL, latency time.Duration) { - l.metric.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) -} - -type sizeAdapter struct { - metric *prometheus.HistogramVec -} - -func (s *sizeAdapter) Observe(ctx context.Context, verb string, host string, size float64) { - s.metric.WithLabelValues(verb, host).Observe(size) -} - type resultAdapter struct { metric *prometheus.CounterVec } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go index 8b0f3634..31463587 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -24,7 +24,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters") @@ -242,15 +241,6 @@ type and struct { predicates []Predicate } -func (a and) InjectFunc(f inject.Func) error { - for _, p := range a.predicates { - if err := f(p); err != nil { - return err - } - } - return nil -} - func (a and) Create(e event.CreateEvent) bool { for _, p := range a.predicates { if !p.Create(e) { @@ -296,15 +286,6 @@ type or struct { predicates []Predicate } -func (o or) InjectFunc(f inject.Func) error { - for _, p := range o.predicates { - if err := f(p); err != nil { - return err - } - } - return nil -} - func (o or) Create(e event.CreateEvent) bool { for _, p := range o.predicates { if p.Create(e) { @@ -350,10 +331,6 @@ type not struct { predicate Predicate } -func (n not) InjectFunc(f inject.Func) error { - return f(n.predicate) -} - func (n not) Create(e event.CreateEvent) bool { return !n.predicate.Create(e) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go index 8285e2ca..4c8f8357 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go @@ -18,6 +18,7 @@ package reconcile import ( "context" + "errors" "time" "k8s.io/apimachinery/pkg/types" @@ -100,3 +101,30 @@ var _ Reconciler = Func(nil) // Reconcile implements Reconciler. func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) } + +// TerminalError is an error that will not be retried but still be logged +// and recorded in metrics. +func TerminalError(wrapped error) error { + return &terminalError{err: wrapped} +} + +type terminalError struct { + err error +} + +// This function will return nil if te.err is nil. +func (te *terminalError) Unwrap() error { + return te.err +} + +func (te *terminalError) Error() string { + if te.err == nil { + return "nil terminal error" + } + return "terminal error: " + te.err.Error() +} + +func (te *terminalError) Is(target error) bool { + tp := &terminalError{} + return errors.As(target, &tp) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go deleted file mode 100644 index 17c60895..00000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package inject defines interfaces and functions for propagating dependencies from a ControllerManager to -the components registered with it. Dependencies are propagated to Reconciler, Source, EventHandler and Predicate -objects which implement the Injectable interfaces. -*/ -package inject diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go deleted file mode 100644 index c8c56ba8..00000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package inject is used by a Manager to inject types into Sources, EventHandlers, Predicates, and Reconciles. -// Deprecated: Use manager.Options fields directly. This package will be removed in v0.10. -package inject - -import ( - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and -// Reconciles. -type Cache interface { - InjectCache(cache cache.Cache) error -} - -// CacheInto will set informers on i and return the result if it implements Cache. Returns -// false if i does not implement Cache. -func CacheInto(c cache.Cache, i interface{}) (bool, error) { - if s, ok := i.(Cache); ok { - return true, s.InjectCache(c) - } - return false, nil -} - -// APIReader is used by the Manager to inject the APIReader into necessary types. -type APIReader interface { - InjectAPIReader(client.Reader) error -} - -// APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto. -// Returns false if i does not implement APIReader. -func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { - if s, ok := i.(APIReader); ok { - return true, s.InjectAPIReader(reader) - } - return false, nil -} - -// Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and -// Reconciles. -type Config interface { - InjectConfig(*rest.Config) error -} - -// ConfigInto will set config on i and return the result if it implements Config. Returns -// false if i does not implement Config. -func ConfigInto(config *rest.Config, i interface{}) (bool, error) { - if s, ok := i.(Config); ok { - return true, s.InjectConfig(config) - } - return false, nil -} - -// Client is used by the ControllerManager to inject client into Sources, EventHandlers, Predicates, and -// Reconciles. -type Client interface { - InjectClient(client.Client) error -} - -// ClientInto will set client on i and return the result if it implements Client. Returns -// false if i does not implement Client. -func ClientInto(client client.Client, i interface{}) (bool, error) { - if s, ok := i.(Client); ok { - return true, s.InjectClient(client) - } - return false, nil -} - -// Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and -// Reconciles. -type Scheme interface { - InjectScheme(scheme *runtime.Scheme) error -} - -// SchemeInto will set scheme and return the result on i if it implements Scheme. Returns -// false if i does not implement Scheme. -func SchemeInto(scheme *runtime.Scheme, i interface{}) (bool, error) { - if is, ok := i.(Scheme); ok { - return true, is.InjectScheme(scheme) - } - return false, nil -} - -// Stoppable is used by the ControllerManager to inject stop channel into Sources, -// EventHandlers, Predicates, and Reconciles. -type Stoppable interface { - InjectStopChannel(<-chan struct{}) error -} - -// StopChannelInto will set stop channel on i and return the result if it implements Stoppable. -// Returns false if i does not implement Stoppable. -func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) { - if s, ok := i.(Stoppable); ok { - return true, s.InjectStopChannel(stop) - } - return false, nil -} - -// Mapper is used to inject the rest mapper to components that may need it. -type Mapper interface { - InjectMapper(meta.RESTMapper) error -} - -// MapperInto will set the rest mapper on i and return the result if it implements Mapper. -// Returns false if i does not implement Mapper. -func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) { - if m, ok := i.(Mapper); ok { - return true, m.InjectMapper(mapper) - } - return false, nil -} - -// Func injects dependencies into i. -type Func func(i interface{}) error - -// Injector is used by the ControllerManager to inject Func into Controllers. -type Injector interface { - InjectFunc(f Func) error -} - -// InjectorInto will set f and return the result on i if it implements Injector. Returns -// false if i does not implement Injector. -func InjectorInto(f Func, i interface{}) (bool, error) { - if ii, ok := i.(Injector); ok { - return true, ii.InjectFunc(f) - } - return false, nil -} - -// Logger is used to inject Loggers into components that need them -// and don't otherwise have opinions. -type Logger interface { - InjectLogger(l logr.Logger) error -} - -// LoggerInto will set the logger on the given object if it implements inject.Logger, -// returning true if a InjectLogger was called, and false otherwise. -func LoggerInto(l logr.Logger, i interface{}) (bool, error) { - if injectable, wantsLogger := i.(Logger); wantsLogger { - return true, injectable.InjectLogger(l) - } - return false, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go index 6b675639..099c8d68 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -18,28 +18,19 @@ package source import ( "context" - "errors" "fmt" "sync" - "time" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - "sigs.k8s.io/controller-runtime/pkg/source/internal" + internal "sigs.k8s.io/controller-runtime/pkg/internal/source" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/predicate" ) -var log = logf.RuntimeLog.WithName("source") - const ( // defaultBufferSize is the default number of event notifications that can be buffered. defaultBufferSize = 1024 @@ -52,8 +43,7 @@ const ( // // * Use Channel for events originating outside the cluster (eh.g. GitHub Webhook callback, Polling external urls). // -// Users may build their own Source implementations. If their implementations implement any of the inject package -// interfaces, the dependencies will be injected by the Controller when Watch is called. +// Users may build their own Source implementations. type Source interface { // Start is internal and should be called only by the Controller to register an EventHandler with the Informer // to enqueue reconcile.Requests. @@ -67,144 +57,9 @@ type SyncingSource interface { WaitForSync(ctx context.Context) error } -// NewKindWithCache creates a Source without InjectCache, so that it is assured that the given cache is used -// and not overwritten. It can be used to watch objects in a different cluster by passing the cache -// from that other cluster. -func NewKindWithCache(object client.Object, cache cache.Cache) SyncingSource { - return &kindWithCache{kind: Kind{Type: object, cache: cache}} -} - -type kindWithCache struct { - kind Kind -} - -func (ks *kindWithCache) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { - return ks.kind.Start(ctx, handler, queue, prct...) -} - -func (ks *kindWithCache) String() string { - return ks.kind.String() -} - -func (ks *kindWithCache) WaitForSync(ctx context.Context) error { - return ks.kind.WaitForSync(ctx) -} - -// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). -type Kind struct { - // Type is the type of object to watch. e.g. &v1.Pod{} - Type client.Object - - // cache used to watch APIs - cache cache.Cache - - // started may contain an error if one was encountered during startup. If its closed and does not - // contain an error, startup and syncing finished. - started chan error - startCancel func() -} - -var _ SyncingSource = &Kind{} - -// Start is internal and should be called only by the Controller to register an EventHandler with the Informer -// to enqueue reconcile.Requests. -func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { - // Type should have been specified by the user. - if ks.Type == nil { - return fmt.Errorf("must specify Kind.Type") - } - - // cache should have been injected before Start was called - if ks.cache == nil { - return fmt.Errorf("must call CacheInto on Kind before calling Start") - } - - // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not - // sync that informer (most commonly due to RBAC issues). - ctx, ks.startCancel = context.WithCancel(ctx) - ks.started = make(chan error) - go func() { - var ( - i cache.Informer - lastErr error - ) - - // Tries to get an informer until it returns true, - // an error or the specified context is cancelled or expired. - if err := wait.PollImmediateUntilWithContext(ctx, 10*time.Second, func(ctx context.Context) (bool, error) { - // Lookup the Informer from the Cache and add an EventHandler which populates the Queue - i, lastErr = ks.cache.GetInformer(ctx, ks.Type) - if lastErr != nil { - kindMatchErr := &meta.NoKindMatchError{} - switch { - case errors.As(lastErr, &kindMatchErr): - log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", - "kind", kindMatchErr.GroupKind) - case runtime.IsNotRegisteredError(lastErr): - log.Error(lastErr, "kind must be registered to the Scheme") - default: - log.Error(lastErr, "failed to get informer from cache") - } - return false, nil // Retry. - } - return true, nil - }); err != nil { - if lastErr != nil { - ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr) - return - } - ks.started <- err - return - } - - _, err := i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) - if err != nil { - ks.started <- err - return - } - if !ks.cache.WaitForCacheSync(ctx) { - // Would be great to return something more informative here - ks.started <- errors.New("cache did not sync") - } - close(ks.started) - }() - - return nil -} - -func (ks *Kind) String() string { - if ks.Type != nil { - return fmt.Sprintf("kind source: %T", ks.Type) - } - return "kind source: unknown type" -} - -// WaitForSync implements SyncingSource to allow controllers to wait with starting -// workers until the cache is synced. -func (ks *Kind) WaitForSync(ctx context.Context) error { - select { - case err := <-ks.started: - return err - case <-ctx.Done(): - ks.startCancel() - if errors.Is(ctx.Err(), context.Canceled) { - return nil - } - return errors.New("timed out waiting for cache to be synced") - } -} - -var _ inject.Cache = &Kind{} - -// InjectCache is internal should be called only by the Controller. InjectCache is used to inject -// the Cache dependency initialized by the ControllerManager. -func (ks *Kind) InjectCache(c cache.Cache) error { - if ks.cache == nil { - ks.cache = c - } - return nil +// Kind creates a KindSource with the given cache provider. +func Kind(cache cache.Cache, object client.Object) SyncingSource { + return &internal.Kind{Type: object, Cache: cache} } var _ Source = &Channel{} @@ -219,9 +74,6 @@ type Channel struct { // Source is the source channel to fetch GenericEvents Source <-chan event.GenericEvent - // stop is to end ongoing goroutine, and close the channels - stop <-chan struct{} - // dest is the destination channels of the added event handlers dest []chan event.GenericEvent @@ -237,18 +89,6 @@ func (cs *Channel) String() string { return fmt.Sprintf("channel source: %p", cs) } -var _ inject.Stoppable = &Channel{} - -// InjectStopChannel is internal should be called only by the Controller. -// It is used to inject the stop channel initialized by the ControllerManager. -func (cs *Channel) InjectStopChannel(stop <-chan struct{}) error { - if cs.stop == nil { - cs.stop = stop - } - - return nil -} - // Start implements Source and should only be called by the Controller. func (cs *Channel) Start( ctx context.Context, @@ -260,11 +100,6 @@ func (cs *Channel) Start( return fmt.Errorf("must specify Channel.Source") } - // stop should have been injected before Start was called - if cs.stop == nil { - return fmt.Errorf("must call InjectStop on Channel before calling Start") - } - // use default value if DestBufferSize not specified if cs.DestBufferSize == 0 { cs.DestBufferSize = defaultBufferSize @@ -292,7 +127,11 @@ func (cs *Channel) Start( } if shouldHandle { - handler.Generic(evt, queue) + func() { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + handler.Generic(ctx, evt, queue) + }() } } }() @@ -359,7 +198,7 @@ func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, que return fmt.Errorf("must specify Informer.Informer") } - _, err := is.Informer.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) + _, err := is.Informer.AddEventHandler(internal.NewEventHandler(ctx, queue, handler, prct).HandlerFuncs()) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go index c7cb71b7..7e9c0a96 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go @@ -19,7 +19,6 @@ package admission import ( "fmt" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/json" @@ -32,8 +31,11 @@ type Decoder struct { } // NewDecoder creates a Decoder given the runtime.Scheme. -func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { - return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +func NewDecoder(scheme *runtime.Scheme) *Decoder { + if scheme == nil { + panic("scheme should never be nil") + } + return &Decoder{codecs: serializer.NewCodecFactory(scheme)} } // Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object. @@ -62,9 +64,14 @@ func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) er if len(rawObj.Raw) == 0 { return fmt.Errorf("there is no content to decode") } - if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured { + if unstructuredInto, isUnstructured := into.(runtime.Unstructured); isUnstructured { // unmarshal into unstructured's underlying object to avoid calling the decoder - return json.Unmarshal(rawObj.Raw, &unstructuredInto.Object) + var object map[string]interface{} + if err := json.Unmarshal(rawObj.Raw, &object); err != nil { + return err + } + unstructuredInto.SetUnstructuredContent(object) + return nil } deserializer := d.codecs.UniversalDeserializer() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go index e4e0778f..a3b72071 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go @@ -33,9 +33,9 @@ type Defaulter interface { } // DefaultingWebhookFor creates a new Webhook for Defaulting the provided type. -func DefaultingWebhookFor(defaulter Defaulter) *Webhook { +func DefaultingWebhookFor(scheme *runtime.Scheme, defaulter Defaulter) *Webhook { return &Webhook{ - Handler: &mutatingHandler{defaulter: defaulter}, + Handler: &mutatingHandler{defaulter: defaulter, decoder: NewDecoder(scheme)}, } } @@ -44,16 +44,11 @@ type mutatingHandler struct { decoder *Decoder } -var _ DecoderInjector = &mutatingHandler{} - -// InjectDecoder injects the decoder into a mutatingHandler. -func (h *mutatingHandler) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.defaulter == nil { panic("defaulter should never be nil") } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go index 70079842..5f697e7d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go @@ -34,9 +34,9 @@ type CustomDefaulter interface { } // WithCustomDefaulter creates a new Webhook for a CustomDefaulter interface. -func WithCustomDefaulter(obj runtime.Object, defaulter CustomDefaulter) *Webhook { +func WithCustomDefaulter(scheme *runtime.Scheme, obj runtime.Object, defaulter CustomDefaulter) *Webhook { return &Webhook{ - Handler: &defaulterForType{object: obj, defaulter: defaulter}, + Handler: &defaulterForType{object: obj, defaulter: defaulter, decoder: NewDecoder(scheme)}, } } @@ -46,15 +46,11 @@ type defaulterForType struct { decoder *Decoder } -var _ DecoderInjector = &defaulterForType{} - -func (h *defaulterForType) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *defaulterForType) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.defaulter == nil { panic("defaulter should never be nil") } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go index 0b274dd0..8dc0cbec 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go @@ -20,9 +20,3 @@ Package admission provides implementation for admission webhook and methods to i See examples/mutatingwebhook.go and examples/validatingwebhook.go for examples of admission webhooks. */ package admission - -import ( - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" -) - -var log = logf.RuntimeLog.WithName("admission") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go index 066cc422..84ab5e75 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -52,7 +52,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { var reviewResponse Response if r.Body == nil { err = errors.New("request body is empty") - wh.log.Error(err, "bad request") + wh.getLogger(nil).Error(err, "bad request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return @@ -60,7 +60,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() if body, err = io.ReadAll(r.Body); err != nil { - wh.log.Error(err, "unable to read the body from the incoming request") + wh.getLogger(nil).Error(err, "unable to read the body from the incoming request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return @@ -69,7 +69,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { // verify the content type is accurate if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { err = fmt.Errorf("contentType=%s, expected application/json", contentType) - wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) + wh.getLogger(nil).Error(err, "unable to process a request with unknown content type") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return @@ -88,12 +88,12 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) _, actualAdmRevGVK, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar) if err != nil { - wh.log.Error(err, "unable to decode the request") + wh.getLogger(nil).Error(err, "unable to decode the request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return } - wh.log.V(1).Info("received request", "UID", req.UID, "kind", req.Kind, "resource", req.Resource) + wh.getLogger(&req).V(4).Info("received request") reviewResponse = wh.Handle(ctx, req) wh.writeResponseTyped(w, reviewResponse, actualAdmRevGVK) @@ -124,7 +124,7 @@ func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK // writeAdmissionResponse writes ar to w. func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { if err := json.NewEncoder(w).Encode(ar); err != nil { - wh.log.Error(err, "unable to encode and write the response") + wh.getLogger(nil).Error(err, "unable to encode and write the response") // Since the `ar v1.AdmissionReview` is a clear and legal object, // it should not have problem to be marshalled into bytes. // The error here is probably caused by the abnormal HTTP connection, @@ -132,15 +132,15 @@ func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { // to avoid endless circular calling. serverError := Errored(http.StatusInternalServerError, err) if err = json.NewEncoder(w).Encode(v1.AdmissionReview{Response: &serverError.AdmissionResponse}); err != nil { - wh.log.Error(err, "still unable to encode and write the InternalServerError response") + wh.getLogger(nil).Error(err, "still unable to encode and write the InternalServerError response") } } else { res := ar.Response - if log := wh.log; log.V(1).Enabled() { + if log := wh.getLogger(nil); log.V(4).Enabled() { if res.Result != nil { - log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason) + log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason, "message", res.Result.Message) } - log.V(1).Info("wrote response", "UID", res.UID, "allowed", res.Allowed) + log.V(4).Info("wrote response", "requestID", res.UID, "allowed", res.Allowed) } } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go deleted file mode 100644 index d5af0d59..00000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -// DecoderInjector is used by the ControllerManager to inject decoder into webhook handlers. -type DecoderInjector interface { - InjectDecoder(*Decoder) error -} - -// InjectDecoderInto will set decoder on i and return the result if it implements Decoder. Returns -// false if i does not implement Decoder. -func InjectDecoderInto(decoder *Decoder, i interface{}) (bool, error) { - if s, ok := i.(DecoderInjector); ok { - return true, s.InjectDecoder(decoder) - } - return false, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go index 26900cf2..2f7820d0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go @@ -25,8 +25,6 @@ import ( jsonpatch "gomodules.xyz/jsonpatch/v2" admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) type multiMutating []Handler @@ -62,31 +60,6 @@ func (hs multiMutating) Handle(ctx context.Context, req Request) Response { } } -// InjectFunc injects the field setter into the handlers. -func (hs multiMutating) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - for _, handler := range hs { - if err := f(handler); err != nil { - return err - } - } - - return nil -} - -// InjectDecoder injects the decoder into the handlers. -func (hs multiMutating) InjectDecoder(d *Decoder) error { - for _, handler := range hs { - if _, err := InjectDecoderInto(d, handler); err != nil { - return err - } - } - return nil -} - // MultiMutatingHandler combines multiple mutating webhook handlers into a single // mutating webhook handler. Handlers are called in sequential order, and the first // `allowed: false` response may short-circuit the rest. Users must take care to @@ -120,28 +93,3 @@ func (hs multiValidating) Handle(ctx context.Context, req Request) Response { func MultiValidatingHandler(handlers ...Handler) Handler { return multiValidating(handlers) } - -// InjectFunc injects the field setter into the handlers. -func (hs multiValidating) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - for _, handler := range hs { - if err := f(handler); err != nil { - return err - } - } - - return nil -} - -// InjectDecoder injects the decoder into the handlers. -func (hs multiValidating) InjectDecoder(d *Decoder) error { - for _, handler := range hs { - if _, err := InjectDecoderInto(d, handler); err != nil { - return err - } - } - return nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go index 24ff1dee..ec1c88c9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go @@ -26,21 +26,21 @@ import ( // Allowed constructs a response indicating that the given operation // is allowed (without any patches). -func Allowed(reason string) Response { - return ValidationResponse(true, reason) +func Allowed(message string) Response { + return ValidationResponse(true, message) } // Denied constructs a response indicating that the given operation // is not allowed. -func Denied(reason string) Response { - return ValidationResponse(false, reason) +func Denied(message string) Response { + return ValidationResponse(false, message) } // Patched constructs a response indicating that the given operation is // allowed, and that the target object should be modified by the given // JSONPatch operations. -func Patched(reason string, patches ...jsonpatch.JsonPatchOperation) Response { - resp := Allowed(reason) +func Patched(message string, patches ...jsonpatch.JsonPatchOperation) Response { + resp := Allowed(message) resp.Patches = patches return resp @@ -60,21 +60,24 @@ func Errored(code int32, err error) Response { } // ValidationResponse returns a response for admitting a request. -func ValidationResponse(allowed bool, reason string) Response { +func ValidationResponse(allowed bool, message string) Response { code := http.StatusForbidden + reason := metav1.StatusReasonForbidden if allowed { code = http.StatusOK + reason = "" } resp := Response{ AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: allowed, Result: &metav1.Status{ - Code: int32(code), + Code: int32(code), + Reason: reason, }, }, } - if len(reason) > 0 { - resp.Result.Reason = metav1.StatusReason(reason) + if len(message) > 0 { + resp.Result.Message = message } return resp } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go index 4b27e75e..00bda8a4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -18,7 +18,8 @@ package admission import ( "context" - goerrors "errors" + "errors" + "fmt" "net/http" v1 "k8s.io/api/admission/v1" @@ -26,18 +27,35 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// Warnings represents warning messages. +type Warnings []string + // Validator defines functions for validating an operation. +// The custom resource kind which implements this interface can validate itself. +// To validate the custom resource with another specific struct, use CustomValidator instead. type Validator interface { runtime.Object - ValidateCreate() error - ValidateUpdate(old runtime.Object) error - ValidateDelete() error + + // ValidateCreate validates the object on creation. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateCreate() (warnings Warnings, err error) + + // ValidateUpdate validates the object on update. The oldObj is the object before the update. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateUpdate(old runtime.Object) (warnings Warnings, err error) + + // ValidateDelete validates the object on deletion. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateDelete() (warnings Warnings, err error) } // ValidatingWebhookFor creates a new Webhook for validating the provided type. -func ValidatingWebhookFor(validator Validator) *Webhook { +func ValidatingWebhookFor(scheme *runtime.Scheme, validator Validator) *Webhook { return &Webhook{ - Handler: &validatingHandler{validator: validator}, + Handler: &validatingHandler{validator: validator, decoder: NewDecoder(scheme)}, } } @@ -46,42 +64,34 @@ type validatingHandler struct { decoder *Decoder } -var _ DecoderInjector = &validatingHandler{} - -// InjectDecoder injects the decoder into a validatingHandler. -func (h *validatingHandler) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.validator == nil { panic("validator should never be nil") } - // Get the object in the request obj := h.validator.DeepCopyObject().(Validator) - if req.Operation == v1.Create { - err := h.decoder.Decode(req, obj) - if err != nil { - return Errored(http.StatusBadRequest, err) - } - err = obj.ValidateCreate() - if err != nil { - var apiStatus apierrors.APIStatus - if goerrors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) - } - return Denied(err.Error()) + var err error + var warnings []string + + switch req.Operation { + case v1.Connect: + // No validation for connect requests. + // TODO(vincepri): Should we validate CONNECT requests? In what cases? + case v1.Create: + if err = h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) } - } - if req.Operation == v1.Update { + warnings, err = obj.ValidateCreate() + case v1.Update: oldObj := obj.DeepCopyObject() - err := h.decoder.DecodeRaw(req.Object, obj) + err = h.decoder.DecodeRaw(req.Object, obj) if err != nil { return Errored(http.StatusBadRequest, err) } @@ -90,33 +100,26 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, err) } - err = obj.ValidateUpdate(oldObj) - if err != nil { - var apiStatus apierrors.APIStatus - if goerrors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) - } - return Denied(err.Error()) - } - } - - if req.Operation == v1.Delete { + warnings, err = obj.ValidateUpdate(oldObj) + case v1.Delete: // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 // OldObject contains the object being deleted - err := h.decoder.DecodeRaw(req.OldObject, obj) + err = h.decoder.DecodeRaw(req.OldObject, obj) if err != nil { return Errored(http.StatusBadRequest, err) } - err = obj.ValidateDelete() - if err != nil { - var apiStatus apierrors.APIStatus - if goerrors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) - } - return Denied(err.Error()) - } + warnings, err = obj.ValidateDelete() + default: + return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation %q", req.Operation)) } - return Allowed("") + if err != nil { + var apiStatus apierrors.APIStatus + if errors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()).WithWarnings(warnings...) + } + return Denied(err.Error()).WithWarnings(warnings...) + } + return Allowed("").WithWarnings(warnings...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go index 33252f11..e99fbd8a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go @@ -28,16 +28,29 @@ import ( ) // CustomValidator defines functions for validating an operation. +// The object to be validated is passed into methods as a parameter. type CustomValidator interface { - ValidateCreate(ctx context.Context, obj runtime.Object) error - ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error - ValidateDelete(ctx context.Context, obj runtime.Object) error + + // ValidateCreate validates the object on creation. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateCreate(ctx context.Context, obj runtime.Object) (warnings Warnings, err error) + + // ValidateUpdate validates the object on update. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings Warnings, err error) + + // ValidateDelete validates the object on deletion. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateDelete(ctx context.Context, obj runtime.Object) (warnings Warnings, err error) } // WithCustomValidator creates a new Webhook for validating the provided type. -func WithCustomValidator(obj runtime.Object, validator CustomValidator) *Webhook { +func WithCustomValidator(scheme *runtime.Scheme, obj runtime.Object, validator CustomValidator) *Webhook { return &Webhook{ - Handler: &validatorForType{object: obj, validator: validator}, + Handler: &validatorForType{object: obj, validator: validator, decoder: NewDecoder(scheme)}, } } @@ -47,16 +60,11 @@ type validatorForType struct { decoder *Decoder } -var _ DecoderInjector = &validatorForType{} - -// InjectDecoder injects the decoder into a validatingHandler. -func (h *validatorForType) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *validatorForType) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.validator == nil { panic("validator should never be nil") } @@ -70,13 +78,18 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response { obj := h.object.DeepCopyObject() var err error + var warnings []string + switch req.Operation { + case v1.Connect: + // No validation for connect requests. + // TODO(vincepri): Should we validate CONNECT requests? In what cases? case v1.Create: if err := h.decoder.Decode(req, obj); err != nil { return Errored(http.StatusBadRequest, err) } - err = h.validator.ValidateCreate(ctx, obj) + warnings, err = h.validator.ValidateCreate(ctx, obj) case v1.Update: oldObj := obj.DeepCopyObject() if err := h.decoder.DecodeRaw(req.Object, obj); err != nil { @@ -86,7 +99,7 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, err) } - err = h.validator.ValidateUpdate(ctx, oldObj, obj) + warnings, err = h.validator.ValidateUpdate(ctx, oldObj, obj) case v1.Delete: // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 // OldObject contains the object being deleted @@ -94,20 +107,20 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, err) } - err = h.validator.ValidateDelete(ctx, obj) + warnings, err = h.validator.ValidateDelete(ctx, obj) default: - return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation request %q", req.Operation)) + return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation %q", req.Operation)) } // Check the error message first. if err != nil { var apiStatus apierrors.APIStatus if errors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) + return validationResponseFromStatus(false, apiStatus.Status()).WithWarnings(warnings...) } - return Denied(err.Error()) + return Denied(err.Error()).WithWarnings(warnings...) } // Return allowed if everything succeeded. - return Allowed("") + return Allowed("").WithWarnings(warnings...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go index d10b97dd..f1767f31 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -21,18 +21,17 @@ import ( "errors" "fmt" "net/http" + "sync" "github.com/go-logr/logr" - jsonpatch "gomodules.xyz/jsonpatch/v2" + "gomodules.xyz/jsonpatch/v2" admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog/v2" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) @@ -131,16 +130,14 @@ type Webhook struct { // headers thus allowing you to read them from within the handler WithContextFunc func(context.Context, *http.Request) context.Context - // decoder is constructed on receiving a scheme and passed down to then handler - decoder *Decoder + // LogConstructor is used to construct a logger for logging messages during webhook calls + // based on the given base logger (which might carry more values like the webhook's path). + // Note: LogConstructor has to be able to handle nil requests as we are also using it + // outside the context of requests. + LogConstructor func(base logr.Logger, req *Request) logr.Logger - log logr.Logger -} - -// InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook. -func (wh *Webhook) InjectLogger(l logr.Logger) error { - wh.log = l - return nil + setupLogOnce sync.Once + log logr.Logger } // WithRecoverPanic takes a bool flag which indicates whether the panic caused by webhook should be recovered. @@ -166,79 +163,47 @@ func (wh *Webhook) Handle(ctx context.Context, req Request) (response Response) }() } + reqLog := wh.getLogger(&req) + ctx = logf.IntoContext(ctx, reqLog) + resp := wh.Handler.Handle(ctx, req) if err := resp.Complete(req); err != nil { - wh.log.Error(err, "unable to encode response") + reqLog.Error(err, "unable to encode response") return Errored(http.StatusInternalServerError, errUnableToEncodeResponse) } return resp } -// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. -func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { - // TODO(directxman12): we should have a better way to pass this down - - var err error - wh.decoder, err = NewDecoder(s) - if err != nil { - return err - } - - // inject the decoder here too, just in case the order of calling this is not - // scheme first, then inject func - if wh.Handler != nil { - if _, err := InjectDecoderInto(wh.GetDecoder(), wh.Handler); err != nil { - return err +// getLogger constructs a logger from the injected log and LogConstructor. +func (wh *Webhook) getLogger(req *Request) logr.Logger { + wh.setupLogOnce.Do(func() { + if wh.log.GetSink() == nil { + wh.log = logf.Log.WithName("admission") } - } - - return nil -} + }) -// GetDecoder returns a decoder to decode the objects embedded in admission requests. -// It may be nil if we haven't received a scheme to use to determine object types yet. -func (wh *Webhook) GetDecoder() *Decoder { - return wh.decoder + logConstructor := wh.LogConstructor + if logConstructor == nil { + logConstructor = DefaultLogConstructor + } + return logConstructor(wh.log, req) } -// InjectFunc injects the field setter into the webhook. -func (wh *Webhook) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - - // also inject a decoder, and wrap this so that we get a setFields - // that injects a decoder (hopefully things don't ignore the duplicate - // InjectorInto call). - - var setFields inject.Func - setFields = func(target interface{}) error { - if err := f(target); err != nil { - return err - } - - if _, err := inject.InjectorInto(setFields, target); err != nil { - return err - } - - if _, err := InjectDecoderInto(wh.GetDecoder(), target); err != nil { - return err - } - - return nil +// DefaultLogConstructor adds some commonly interesting fields to the given logger. +func DefaultLogConstructor(base logr.Logger, req *Request) logr.Logger { + if req != nil { + return base.WithValues("object", klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + "resource", req.Resource, "user", req.UserInfo.Username, + "requestID", req.UID, + ) } - - return setFields(wh.Handler) + return base } // StandaloneOptions let you configure a StandaloneWebhook. type StandaloneOptions struct { - // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources - // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better - // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. - Scheme *runtime.Scheme // Logger to be used by the webhook. // If none is set, it defaults to log.Log global logger. Logger logr.Logger @@ -258,19 +223,9 @@ type StandaloneOptions struct { // in your own server/mux. In order to be accessed by a kubernetes cluster, // all webhook servers require TLS. func StandaloneWebhook(hook *Webhook, opts StandaloneOptions) (http.Handler, error) { - if opts.Scheme == nil { - opts.Scheme = scheme.Scheme - } - - if err := hook.InjectScheme(opts.Scheme); err != nil { - return nil, err + if opts.Logger.GetSink() != nil { + hook.log = opts.Logger } - - if opts.Logger.GetSink() == nil { - opts.Logger = logf.RuntimeLog.WithName("webhook") - } - hook.log = opts.Logger - if opts.MetricsPath == "" { return hook, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go index 879aae3c..249a364b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go @@ -39,28 +39,20 @@ var ( log = logf.Log.WithName("conversion-webhook") ) -// Webhook implements a CRD conversion webhook HTTP handler. -type Webhook struct { - scheme *runtime.Scheme - decoder *Decoder +func NewWebhookHandler(scheme *runtime.Scheme) http.Handler { + return &webhook{scheme: scheme, decoder: NewDecoder(scheme)} } -// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. -func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { - var err error - wh.scheme = s - wh.decoder, err = NewDecoder(s) - if err != nil { - return err - } - - return nil +// webhook implements a CRD conversion webhook HTTP handler. +type webhook struct { + scheme *runtime.Scheme + decoder *Decoder } // ensure Webhook implements http.Handler -var _ http.Handler = &Webhook{} +var _ http.Handler = &webhook{} -func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (wh *webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { convertReview := &apix.ConversionReview{} err := json.NewDecoder(r.Body).Decode(convertReview) if err != nil { @@ -95,7 +87,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // handles a version conversion request. -func (wh *Webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.ConversionResponse, error) { +func (wh *webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.ConversionResponse, error) { if req == nil { return nil, fmt.Errorf("conversion request is nil") } @@ -128,7 +120,7 @@ func (wh *Webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.Conv // convertObject will convert given a src object to dst object. // Note(droot): couldn't find a way to reduce the cyclomatic complexity under 10 // without compromising readability, so disabling gocyclo linter -func (wh *Webhook) convertObject(src, dst runtime.Object) error { +func (wh *webhook) convertObject(src, dst runtime.Object) error { srcGVK := src.GetObjectKind().GroupVersionKind() dstGVK := dst.GetObjectKind().GroupVersionKind() @@ -155,7 +147,7 @@ func (wh *Webhook) convertObject(src, dst runtime.Object) error { } } -func (wh *Webhook) convertViaHub(src, dst conversion.Convertible) error { +func (wh *webhook) convertViaHub(src, dst conversion.Convertible) error { hub, err := wh.getHub(src) if err != nil { return err @@ -179,7 +171,7 @@ func (wh *Webhook) convertViaHub(src, dst conversion.Convertible) error { } // getHub returns an instance of the Hub for passed-in object's group/kind. -func (wh *Webhook) getHub(obj runtime.Object) (conversion.Hub, error) { +func (wh *webhook) getHub(obj runtime.Object) (conversion.Hub, error) { gvks, err := objectGVKs(wh.scheme, obj) if err != nil { return nil, err @@ -207,7 +199,7 @@ func (wh *Webhook) getHub(obj runtime.Object) (conversion.Hub, error) { } // allocateDstObject returns an instance for a given GVK. -func (wh *Webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, error) { +func (wh *webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, error) { gvk := schema.FromAPIVersionAndKind(apiVersion, kind) obj, err := wh.scheme.New(gvk) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go index 6a9e9c23..b6bb8bd9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go @@ -30,8 +30,11 @@ type Decoder struct { } // NewDecoder creates a Decoder given the runtime.Scheme -func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { - return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +func NewDecoder(scheme *runtime.Scheme) *Decoder { + if scheme == nil { + panic("scheme should never be nil") + } + return &Decoder{codecs: serializer.NewCodecFactory(scheme)} } // Decode decodes the inlined object. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index 99c86326..23d5bf43 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -29,12 +29,9 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/runtime" - kscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/certwatcher" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) @@ -49,7 +46,29 @@ var DefaultPort = 9443 // at the default locations (tls.crt and tls.key). If you do not // want to configure TLS (i.e for testing purposes) run an // admission.StandaloneWebhook in your own server. -type Server struct { +type Server interface { + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates + // the webhook server doesn't need leader election. + NeedLeaderElection() bool + + // Register marks the given webhook as being served at the given path. + // It panics if two hooks are registered on the same path. + Register(path string, hook http.Handler) + + // Start runs the server. + // It will install the webhook related resources depend on the server configuration. + Start(ctx context.Context) error + + // StartedChecker returns an healthz.Checker which is healthy after the + // server has been started. + StartedChecker() healthz.Checker + + // WebhookMux returns the servers WebhookMux + WebhookMux() *http.ServeMux +} + +// Options are all the available options for a webhook.Server +type Options struct { // Host is the address that the server will listen on. // Defaults to "" - all addresses. Host string @@ -63,9 +82,13 @@ type Server struct { CertDir string // CertName is the server certificate name. Defaults to tls.crt. + // + // Note: This option should only be set when TLSOpts does not override GetCertificate. CertName string // KeyName is the server key name. Defaults to tls.key. + // + // Note: This option should only be set when TLSOpts does not override GetCertificate. KeyName string // ClientCAName is the CA certificate name which server used to verify remote(client)'s certificate. @@ -82,13 +105,21 @@ type Server struct { // WebhookMux is the multiplexer that handles different webhooks. WebhookMux *http.ServeMux +} - // webhooks keep track of all registered webhooks for dependency injection, - // and to provide better panic messages on duplicate webhook registration. - webhooks map[string]http.Handler +// NewServer constructs a new Server from the provided options. +func NewServer(o Options) Server { + return &DefaultServer{ + Options: o, + } +} - // setFields allows injecting dependencies from an external source - setFields inject.Func +// DefaultServer is the default implementation used for Server. +type DefaultServer struct { + Options Options + + // webhooks keep track of all registered webhooks + webhooks map[string]http.Handler // defaultingOnce ensures that the default fields are only ever set once. defaultingOnce sync.Once @@ -99,41 +130,49 @@ type Server struct { // mu protects access to the webhook map & setFields for Start, Register, etc mu sync.Mutex + + webhookMux *http.ServeMux } // setDefaults does defaulting for the Server. -func (s *Server) setDefaults() { - s.webhooks = map[string]http.Handler{} - if s.WebhookMux == nil { - s.WebhookMux = http.NewServeMux() +func (o *Options) setDefaults() { + if o.WebhookMux == nil { + o.WebhookMux = http.NewServeMux() } - if s.Port <= 0 { - s.Port = DefaultPort + if o.Port <= 0 { + o.Port = DefaultPort } - if len(s.CertDir) == 0 { - s.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") + if len(o.CertDir) == 0 { + o.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") } - if len(s.CertName) == 0 { - s.CertName = "tls.crt" + if len(o.CertName) == 0 { + o.CertName = "tls.crt" } - if len(s.KeyName) == 0 { - s.KeyName = "tls.key" + if len(o.KeyName) == 0 { + o.KeyName = "tls.key" } } +func (s *DefaultServer) setDefaults() { + s.webhooks = map[string]http.Handler{} + s.Options.setDefaults() + + s.webhookMux = s.Options.WebhookMux +} + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates // the webhook server doesn't need leader election. -func (*Server) NeedLeaderElection() bool { +func (*DefaultServer) NeedLeaderElection() bool { return false } // Register marks the given webhook as being served at the given path. // It panics if two hooks are registered on the same path. -func (s *Server) Register(path string, hook http.Handler) { +func (s *DefaultServer) Register(path string, hook http.Handler) { s.mu.Lock() defer s.mu.Unlock() @@ -141,51 +180,11 @@ func (s *Server) Register(path string, hook http.Handler) { if _, found := s.webhooks[path]; found { panic(fmt.Errorf("can't register duplicate path: %v", path)) } - // TODO(directxman12): call setfields if we've already started the server s.webhooks[path] = hook - s.WebhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) + s.webhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) regLog := log.WithValues("path", path) regLog.Info("Registering webhook") - - // we've already been "started", inject dependencies here. - // Otherwise, InjectFunc will do this for us later. - if s.setFields != nil { - if err := s.setFields(hook); err != nil { - // TODO(directxman12): swallowing this error isn't great, but we'd have to - // change the signature to fix that - regLog.Error(err, "unable to inject fields into webhook during registration") - } - - baseHookLog := log.WithName("webhooks") - - // NB(directxman12): we don't propagate this further by wrapping setFields because it's - // unclear if this is how we want to deal with log propagation. In this specific instance, - // we want to be able to pass a logger to webhooks because they don't know their own path. - if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", path), hook); err != nil { - regLog.Error(err, "unable to logger into webhook during registration") - } - } -} - -// StartStandalone runs a webhook server without -// a controller manager. -func (s *Server) StartStandalone(ctx context.Context, scheme *runtime.Scheme) error { - // Use the Kubernetes client-go scheme if none is specified - if scheme == nil { - scheme = kscheme.Scheme - } - - if err := s.InjectFunc(func(i interface{}) error { - if _, err := inject.SchemeInto(scheme, i); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return s.Start(ctx) } // tlsVersion converts from human-readable TLS version (for example "1.1") @@ -210,41 +209,49 @@ func tlsVersion(version string) (uint16, error) { // Start runs the server. // It will install the webhook related resources depend on the server configuration. -func (s *Server) Start(ctx context.Context) error { +func (s *DefaultServer) Start(ctx context.Context) error { s.defaultingOnce.Do(s.setDefaults) baseHookLog := log.WithName("webhooks") baseHookLog.Info("Starting webhook server") - certPath := filepath.Join(s.CertDir, s.CertName) - keyPath := filepath.Join(s.CertDir, s.KeyName) - - certWatcher, err := certwatcher.New(certPath, keyPath) + tlsMinVersion, err := tlsVersion(s.Options.TLSMinVersion) if err != nil { return err } - go func() { - if err := certWatcher.Start(ctx); err != nil { - log.Error(err, "certificate watcher error") - } - }() - - tlsMinVersion, err := tlsVersion(s.TLSMinVersion) - if err != nil { - return err + cfg := &tls.Config{ //nolint:gosec + NextProtos: []string{"h2"}, + MinVersion: tlsMinVersion, + } + // fallback TLS config ready, will now mutate if passer wants full control over it + for _, op := range s.Options.TLSOpts { + op(cfg) } - cfg := &tls.Config{ //nolint:gosec - NextProtos: []string{"h2"}, - GetCertificate: certWatcher.GetCertificate, - MinVersion: tlsMinVersion, + if cfg.GetCertificate == nil { + certPath := filepath.Join(s.Options.CertDir, s.Options.CertName) + keyPath := filepath.Join(s.Options.CertDir, s.Options.KeyName) + + // Create the certificate watcher and + // set the config's GetCertificate on the TLSConfig + certWatcher, err := certwatcher.New(certPath, keyPath) + if err != nil { + return err + } + cfg.GetCertificate = certWatcher.GetCertificate + + go func() { + if err := certWatcher.Start(ctx); err != nil { + log.Error(err, "certificate watcher error") + } + }() } - // load CA to verify client certificate - if s.ClientCAName != "" { + // Load CA to verify client certificate, if configured. + if s.Options.ClientCAName != "" { certPool := x509.NewCertPool() - clientCABytes, err := os.ReadFile(filepath.Join(s.CertDir, s.ClientCAName)) + clientCABytes, err := os.ReadFile(filepath.Join(s.Options.CertDir, s.Options.ClientCAName)) if err != nil { return fmt.Errorf("failed to read client CA cert: %w", err) } @@ -258,27 +265,23 @@ func (s *Server) Start(ctx context.Context) error { cfg.ClientAuth = tls.RequireAndVerifyClientCert } - // fallback TLS config ready, will now mutate if passer wants full control over it - for _, op := range s.TLSOpts { - op(cfg) - } - - listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), cfg) + listener, err := tls.Listen("tcp", net.JoinHostPort(s.Options.Host, strconv.Itoa(s.Options.Port)), cfg) if err != nil { return err } - log.Info("Serving webhook server", "host", s.Host, "port", s.Port) + log.Info("Serving webhook server", "host", s.Options.Host, "port", s.Options.Port) - srv := httpserver.New(s.WebhookMux) + srv := httpserver.New(s.webhookMux) idleConnsClosed := make(chan struct{}) go func() { <-ctx.Done() - log.Info("shutting down webhook server") + log.Info("Shutting down webhook server with timeout of 1 minute") - // TODO: use a context with reasonable timeout - if err := srv.Shutdown(context.Background()); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + if err := srv.Shutdown(ctx); err != nil { // Error from closing listeners, or context timeout log.Error(err, "error shutting down the HTTP server") } @@ -298,7 +301,7 @@ func (s *Server) Start(ctx context.Context) error { // StartedChecker returns an healthz.Checker which is healthy after the // server has been started. -func (s *Server) StartedChecker() healthz.Checker { +func (s *DefaultServer) StartedChecker() healthz.Checker { config := &tls.Config{ InsecureSkipVerify: true, //nolint:gosec // config is used to connect to our own webhook port. } @@ -311,7 +314,7 @@ func (s *Server) StartedChecker() healthz.Checker { } d := &net.Dialer{Timeout: 10 * time.Second} - conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), config) + conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Options.Host, strconv.Itoa(s.Options.Port)), config) if err != nil { return fmt.Errorf("webhook server is not reachable: %w", err) } @@ -324,23 +327,7 @@ func (s *Server) StartedChecker() healthz.Checker { } } -// InjectFunc injects the field setter into the server. -func (s *Server) InjectFunc(f inject.Func) error { - s.setFields = f - - // inject fields here that weren't injected in Register because we didn't have setFields yet. - baseHookLog := log.WithName("webhooks") - for hookPath, webhook := range s.webhooks { - if err := s.setFields(webhook); err != nil { - return err - } - - // NB(directxman12): we don't propagate this further by wrapping setFields because it's - // unclear if this is how we want to deal with log propagation. In this specific instance, - // we want to be able to pass a logger to webhooks because they don't know their own path. - if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil { - return err - } - } - return nil +// WebhookMux returns the servers WebhookMux +func (s *DefaultServer) WebhookMux() *http.ServeMux { + return s.webhookMux } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go new file mode 100644 index 00000000..75a492d8 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package merge + +import ( + "fmt" + "sort" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// Conflict is a conflict on a specific field with the current manager of +// that field. It does implement the error interface so that it can be +// used as an error. +type Conflict struct { + Manager string + Path fieldpath.Path +} + +// Conflict is an error. +var _ error = Conflict{} + +// Error formats the conflict as an error. +func (c Conflict) Error() string { + return fmt.Sprintf("conflict with %q: %v", c.Manager, c.Path) +} + +// Equals returns true if c == c2 +func (c Conflict) Equals(c2 Conflict) bool { + if c.Manager != c2.Manager { + return false + } + return c.Path.Equals(c2.Path) +} + +// Conflicts accumulates multiple conflicts and aggregates them by managers. +type Conflicts []Conflict + +var _ error = Conflicts{} + +// Error prints the list of conflicts, grouped by sorted managers. +func (conflicts Conflicts) Error() string { + if len(conflicts) == 1 { + return conflicts[0].Error() + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + managers := []string{} + for manager := range m { + managers = append(managers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(managers) + + messages := []string{} + for _, manager := range managers { + messages = append(messages, fmt.Sprintf("conflicts with %q:", manager)) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return strings.Join(messages, "\n") +} + +// Equals returns true if the lists of conflicts are the same. +func (c Conflicts) Equals(c2 Conflicts) bool { + if len(c) != len(c2) { + return false + } + for i := range c { + if !c[i].Equals(c2[i]) { + return false + } + } + return true +} + +// ToSet aggregates conflicts for all managers into a single Set. +func (c Conflicts) ToSet() *fieldpath.Set { + set := fieldpath.NewSet() + for _, conflict := range []Conflict(c) { + set.Insert(conflict.Path) + } + return set +} + +// ConflictsFromManagers creates a list of conflicts given Managers sets. +func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts { + conflicts := []Conflict{} + + for manager, set := range sets { + set.Set().Iterate(func(p fieldpath.Path) { + conflicts = append(conflicts, Conflict{ + Manager: manager, + Path: p, + }) + }) + } + + return conflicts +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go new file mode 100644 index 00000000..1b23dcbd --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -0,0 +1,356 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package merge + +import ( + "fmt" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// Converter is an interface to the conversion logic. The converter +// needs to be able to convert objects from one version to another. +type Converter interface { + Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) + IsMissingVersionError(error) bool +} + +// Updater is the object used to compute updated FieldSets and also +// merge the object on Apply. +type Updater struct { + Converter Converter + IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + + enableUnions bool +} + +// EnableUnionFeature turns on union handling. It is disabled by default until the +// feature is complete. +func (s *Updater) EnableUnionFeature() { + s.enableUnions = true +} + +func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { + conflicts := fieldpath.ManagedFields{} + removed := fieldpath.ManagedFields{} + compare, err := oldObject.Compare(newObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) + } + + versions := map[fieldpath.APIVersion]*typed.Comparison{ + version: compare.ExcludeFields(s.IgnoredFields[version]), + } + + for manager, managerSet := range managers { + if manager == workflow { + continue + } + compare, ok := versions[managerSet.APIVersion()] + if !ok { + var err error + versionedOldObject, err := s.Converter.Convert(oldObject, managerSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + delete(managers, manager) + continue + } + return nil, nil, fmt.Errorf("failed to convert old object: %v", err) + } + versionedNewObject, err := s.Converter.Convert(newObject, managerSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + delete(managers, manager) + continue + } + return nil, nil, fmt.Errorf("failed to convert new object: %v", err) + } + compare, err = versionedOldObject.Compare(versionedNewObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) + } + versions[managerSet.APIVersion()] = compare.ExcludeFields(s.IgnoredFields[managerSet.APIVersion()]) + } + + conflictSet := managerSet.Set().Intersection(compare.Modified.Union(compare.Added)) + if !conflictSet.Empty() { + conflicts[manager] = fieldpath.NewVersionedSet(conflictSet, managerSet.APIVersion(), false) + } + + if !compare.Removed.Empty() { + removed[manager] = fieldpath.NewVersionedSet(compare.Removed, managerSet.APIVersion(), false) + } + } + + if !force && len(conflicts) != 0 { + return nil, nil, ConflictsFromManagers(conflicts) + } + + for manager, conflictSet := range conflicts { + managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(conflictSet.Set()), managers[manager].APIVersion(), managers[manager].Applied()) + } + + for manager, removedSet := range removed { + managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(removedSet.Set()), managers[manager].APIVersion(), managers[manager].Applied()) + } + + for manager := range managers { + if managers[manager].Set().Empty() { + delete(managers, manager) + } + } + + return managers, compare, nil +} + +// Update is the method you should call once you've merged your final +// object on CREATE/UPDATE/PATCH verbs. newObject must be the object +// that you intend to persist (after applying the patch if this is for a +// PATCH call), and liveObject must be the original object (empty if +// this is a CREATE call). +func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string) (*typed.TypedValue, fieldpath.ManagedFields, error) { + var err error + managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if s.enableUnions { + newObject, err = liveObject.NormalizeUnions(newObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if _, ok := managers[manager]; !ok { + managers[manager] = fieldpath.NewVersionedSet(fieldpath.NewSet(), version, false) + } + + ignored := s.IgnoredFields[version] + if ignored == nil { + ignored = fieldpath.NewSet() + } + managers[manager] = fieldpath.NewVersionedSet( + managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed).RecursiveDifference(ignored), + version, + false, + ) + if managers[manager].Set().Empty() { + delete(managers, manager) + } + return newObject, managers, nil +} + +// Apply should be called when Apply is run, given the current object as +// well as the configuration that is applied. This will merge the object +// and return it. If the object hasn't changed, nil is returned (the +// managers can still have changed though). +func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) { + var err error + managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if s.enableUnions { + configObject, err = configObject.NormalizeUnionsApply(configObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + newObject, err := liveObject.Merge(configObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err) + } + if s.enableUnions { + newObject, err = configObject.NormalizeUnionsApply(newObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + lastSet := managers[manager] + set, err := configObject.ToFieldSet() + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to get field set: %v", err) + } + + ignored := s.IgnoredFields[version] + if ignored != nil { + set = set.RecursiveDifference(ignored) + // TODO: is this correct. If we don't remove from lastSet pruning might remove the fields? + if lastSet != nil { + lastSet.Set().RecursiveDifference(ignored) + } + } + managers[manager] = fieldpath.NewVersionedSet(set, version, true) + newObject, err = s.prune(newObject, managers, manager, lastSet) + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err) + } + managers, compare, err := s.update(liveObject, newObject, version, managers, manager, force) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if compare.IsSame() { + newObject = nil + } + return newObject, managers, nil +} + +// prune will remove a field, list or map item, iff: +// * applyingManager applied it last time +// * applyingManager didn't apply it this time +// * no other applier claims to manage it +func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFields, applyingManager string, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) { + if lastSet == nil || lastSet.Set().Empty() { + return merged, nil + } + convertedMerged, err := s.Converter.Convert(merged, lastSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, nil + } + return nil, fmt.Errorf("failed to convert merged object to last applied version: %v", err) + } + + sc, tr := convertedMerged.Schema(), convertedMerged.TypeRef() + pruned := convertedMerged.RemoveItems(lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr)) + pruned, err = s.addBackOwnedItems(convertedMerged, pruned, managers, applyingManager) + if err != nil { + return nil, fmt.Errorf("failed add back owned items: %v", err) + } + pruned, err = s.addBackDanglingItems(convertedMerged, pruned, lastSet) + if err != nil { + return nil, fmt.Errorf("failed add back dangling items: %v", err) + } + return s.Converter.Convert(pruned, managers[applyingManager].APIVersion()) +} + +// addBackOwnedItems adds back any fields, list and map items that were removed by prune, +// but other appliers or updaters (or the current applier's new config) claim to own. +func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { + var err error + managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{} + for _, managerSet := range managedFields { + if _, ok := managedAtVersion[managerSet.APIVersion()]; !ok { + managedAtVersion[managerSet.APIVersion()] = fieldpath.NewSet() + } + managedAtVersion[managerSet.APIVersion()] = managedAtVersion[managerSet.APIVersion()].Union(managerSet.Set()) + } + // Add back owned items at pruned version first to avoid conversion failure + // caused by pruned fields which are required for conversion. + prunedVersion := fieldpath.APIVersion(*pruned.TypeRef().NamedType) + if managed, ok := managedAtVersion[prunedVersion]; ok { + merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, prunedVersion, managed) + if err != nil { + return nil, err + } + delete(managedAtVersion, prunedVersion) + } + for version, managed := range managedAtVersion { + merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, version, managed) + if err != nil { + return nil, err + } + } + return pruned, nil +} + +// addBackOwnedItemsForVersion adds back any fields, list and map items that were removed by prune with specific managed field path at a version. +// It is an extracted sub-function from addBackOwnedItems for code reuse. +func (s *Updater) addBackOwnedItemsForVersion(merged, pruned *typed.TypedValue, version fieldpath.APIVersion, managed *fieldpath.Set) (*typed.TypedValue, *typed.TypedValue, error) { + var err error + merged, err = s.Converter.Convert(merged, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, pruned, nil + } + return nil, nil, fmt.Errorf("failed to convert merged object at version %v: %v", version, err) + } + pruned, err = s.Converter.Convert(pruned, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, pruned, nil + } + return nil, nil, fmt.Errorf("failed to convert pruned object at version %v: %v", version, err) + } + mergedSet, err := merged.ToFieldSet() + if err != nil { + return nil, nil, fmt.Errorf("failed to create field set from merged object at version %v: %v", version, err) + } + prunedSet, err := pruned.ToFieldSet() + if err != nil { + return nil, nil, fmt.Errorf("failed to create field set from pruned object at version %v: %v", version, err) + } + sc, tr := merged.Schema(), merged.TypeRef() + pruned = merged.RemoveItems(mergedSet.EnsureNamedFieldsAreMembers(sc, tr).Difference(prunedSet.EnsureNamedFieldsAreMembers(sc, tr).Union(managed.EnsureNamedFieldsAreMembers(sc, tr)))) + return merged, pruned, nil +} + +// addBackDanglingItems makes sure that the fields list and map items removed by prune were +// previously owned by the currently applying manager. This will add back fields list and map items +// that are unowned or that are owned by Updaters and shouldn't be removed. +func (s *Updater) addBackDanglingItems(merged, pruned *typed.TypedValue, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) { + convertedPruned, err := s.Converter.Convert(pruned, lastSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, nil + } + return nil, fmt.Errorf("failed to convert pruned object to last applied version: %v", err) + } + prunedSet, err := convertedPruned.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from pruned object in last applied version: %v", err) + } + mergedSet, err := merged.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from merged object in last applied version: %v", err) + } + sc, tr := merged.Schema(), merged.TypeRef() + prunedSet = prunedSet.EnsureNamedFieldsAreMembers(sc, tr) + mergedSet = mergedSet.EnsureNamedFieldsAreMembers(sc, tr) + last := lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr) + return merged.RemoveItems(mergedSet.Difference(prunedSet).Intersection(last)), nil +} + +// reconcileManagedFieldsWithSchemaChanges reconciles the managed fields with any changes to the +// object's schema since the managed fields were written. +// +// Supports: +// - changing types from atomic to granular +// - changing types from granular to atomic +func (s *Updater) reconcileManagedFieldsWithSchemaChanges(liveObject *typed.TypedValue, managers fieldpath.ManagedFields) (fieldpath.ManagedFields, error) { + result := fieldpath.ManagedFields{} + for manager, versionedSet := range managers { + tv, err := s.Converter.Convert(liveObject, versionedSet.APIVersion()) + if s.Converter.IsMissingVersionError(err) { // okay to skip, obsolete versions will be deleted automatically anyway + continue + } + if err != nil { + return nil, err + } + reconciled, err := typed.ReconcileFieldSetWithSchema(versionedSet.Set(), tv) + if err != nil { + return nil, err + } + if reconciled != nil { + result[manager] = fieldpath.NewVersionedSet(reconciled, versionedSet.APIVersion(), versionedSet.Applied()) + } else { + result[manager] = versionedSet + } + } + return result, nil +} From 360c6c11adadcfef3623f57b6ff1a5a6f7b8c6ee Mon Sep 17 00:00:00 2001 From: Sascha Schwarze Date: Mon, 13 May 2024 20:27:34 +0200 Subject: [PATCH 2/2] Adopt code --- pkg/inventory/inventory.go | 17 +++++++++++------ test/integration/inventory_controller_test.go | 2 +- test/stubs/shipwright.go | 6 +++--- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/pkg/inventory/inventory.go b/pkg/inventory/inventory.go index 8c3d513e..864df1c6 100644 --- a/pkg/inventory/inventory.go +++ b/pkg/inventory/inventory.go @@ -30,7 +30,7 @@ var _ Interface = &Inventory{} // TriggerRules keeps the source and webhook trigger information for each Build instance. type TriggerRules struct { - source buildapi.Source + source *buildapi.Source trigger buildapi.Trigger } @@ -42,9 +42,11 @@ func (i *Inventory) Add(b *buildapi.Build) { i.m.Lock() defer i.m.Unlock() - if b.Spec.Trigger == nil { - b.Spec.Trigger = &buildapi.Trigger{} + trigger := b.Spec.Trigger + if trigger == nil { + trigger = &buildapi.Trigger{} } + buildName := types.NamespacedName{Namespace: b.GetNamespace(), Name: b.GetName()} i.logger.V(0).Info( "Storing Build on the inventory", @@ -163,13 +165,16 @@ func (i *Inventory) SearchForGit( return i.loopByWhenType(triggerType, func(tr TriggerRules) bool { // first thing to compare, is the repository URL, it must match in order to define the actual // builds that are representing the repository - if tr.source.GitSource == nil { + if tr.source == nil { + return false + } + if tr.source.Type != buildapi.GitType { return false } - if tr.source.GitSource.URL == nil { + if tr.source.Git == nil { return false } - if !CompareURLs(repoURL, *tr.source.GitSource.URL) { + if !CompareURLs(repoURL, tr.source.Git.URL) { return false } diff --git a/test/integration/inventory_controller_test.go b/test/integration/inventory_controller_test.go index bc7559f9..be33b23a 100644 --- a/test/integration/inventory_controller_test.go +++ b/test/integration/inventory_controller_test.go @@ -32,7 +32,7 @@ var _ = Describe("Build Inventory Controller", Ordered, func() { searchForBuildWithGitHubTriggerFn := func() int { return len(buildInventory.SearchForGit( buildapi.GitHubWebHookTrigger, - *buildWithGitHubTrigger.Spec.Source.GitSource.URL, + buildWithGitHubTrigger.Spec.Source.Git.URL, stubs.Branch, )) } diff --git a/test/stubs/shipwright.go b/test/stubs/shipwright.go index cd7e7707..55bf1e43 100644 --- a/test/stubs/shipwright.go +++ b/test/stubs/shipwright.go @@ -55,10 +55,10 @@ func ShipwrightBuild(outputImageBase, name string) *buildapi.Build { Kind: &strategyKind, Name: "buildpacks-v3", }, - Source: buildapi.Source{ + Source: &buildapi.Source{ Type: buildapi.GitType, - GitSource: &buildapi.Git{ - URL: &RepoURL, + Git: &buildapi.Git{ + URL: RepoURL, }, ContextDir: &contextDir, },