From c4b58da2bec6a73bcbc79bd6cdfb5f2d3c8647d9 Mon Sep 17 00:00:00 2001 From: CodePorterL <91072507+CodePorterL@users.noreply.github.com> Date: Fri, 15 Dec 2023 18:58:59 +0800 Subject: [PATCH] feat: v0.5.0.dev231215 (#198) * repo-sync-2023-12-15T17:04:07+0800 * repo-sync-2023-12-15T17:47:55+0800 --- CHANGELOG.md | 6 + cmd/kuscia/autonomy/autonomy.go | 7 +- cmd/kuscia/confloader/config.go | 13 +- cmd/kuscia/confloader/secretbackendloader.go | 1 - cmd/kuscia/lite/lite.go | 8 +- cmd/kuscia/master/master.go | 6 +- cmd/kuscia/modules/agent.go | 4 +- cmd/kuscia/modules/allinone_operator.go | 1 + cmd/kuscia/modules/confmanager.go | 14 +- cmd/kuscia/modules/containerd.go | 10 +- cmd/kuscia/modules/coredns.go | 5 +- cmd/kuscia/modules/domainroute.go | 3 +- cmd/kuscia/modules/envoy.go | 12 +- cmd/kuscia/modules/k3s.go | 17 +- cmd/kuscia/modules/kusciaapi.go | 8 +- cmd/kuscia/modules/modules.go | 54 +- cmd/kuscia/modules/modules_test.go | 4 +- cmd/kuscia/modules/scheduler.go | 6 +- cmd/kuscia/modules/transport.go | 4 +- .../kuscia.secretflow_domainroutes.yaml | 8 +- crds/v1alpha1/kuscia.secretflow_domains.yaml | 7 + ...scia.secretflow_kusciabetadeployments.yaml | 2273 ++++++++++++++++ .../kuscia.secretflow_kusciabetajobs.yaml | 274 ++ .../kuscia.secretflow_kusciabetatasks.yaml | 2301 +++++++++++++++++ ....secretflow_kusciadeploymentsummaries.yaml | 181 ++ .../kuscia.secretflow_kusciajobs.yaml | 32 + .../kuscia.secretflow_kusciajobsummaries.yaml | 172 ++ .../kuscia.secretflow_kusciatasks.yaml | 13 + ...kuscia.secretflow_kusciatasksummaries.yaml | 297 +++ .../K8s_master_lite_cn.md | 171 ++ .../K8s_deployment_kuscia/K8s_p2p_cn.md | 124 + .../K8s_deployment_kuscia/index.rst | 8 + docs/deployment/deploy_master_lite_cn.md | 21 +- docs/deployment/deploy_p2p_cn.md | 24 +- docs/deployment/index.rst | 1 + docs/deployment/kuscia_config_cn.md | 10 +- docs/deployment/logdescription.md | 22 +- docs/development/register_custom_image.md | 2 +- docs/index.md | 3 +- docs/reference/apis/kusciajob_cn.md | 21 +- docs/reference/apis/serving_cn.md | 42 +- docs/reference/apis/summary_cn.md | 20 +- docs/reference/concepts/domaindata_cn.md | 2 +- docs/reference/concepts/domainroute_cn.md | 8 +- docs/reference/concepts/kusciatask_cn.md | 75 + docs/reference/troubleshoot/index.rst | 3 +- .../troubleshoot/userdefinedserviceroute.md | 55 + docs/tutorial/run_secretflow_with_api_cn.md | 106 +- .../listeners/external_listeners.json.tmpl | 8 +- .../listeners/internal_listeners.json.tmpl | 8 +- etc/conf/kuscia.yaml | 100 + hack/k8s/autonomy/configmap.yaml | 56 + hack/k8s/autonomy/deployment.yaml | 46 + hack/k8s/autonomy/rbac.yaml | 34 + hack/k8s/autonomy/service.yaml | 22 + hack/k8s/lite/configmap.yaml | 61 + hack/k8s/lite/deployment.yaml | 44 + hack/k8s/lite/rbac.yaml | 34 + hack/k8s/lite/service.yaml | 22 + hack/k8s/master/configmap.yaml | 26 + hack/k8s/master/deployment.yaml | 46 + hack/k8s/master/service.yaml | 18 + pkg/common/constants.go | 38 +- pkg/confmanager/commands/root.go | 1 - .../clusterdomainroute/controller.go | 8 +- .../clusterdomainroute/controller_test.go | 2 +- .../domain/authorization_resource.go | 3 +- pkg/controllers/domain/controller.go | 16 +- pkg/controllers/domain/domain.go | 2 +- pkg/controllers/domainroute/check.go | 2 +- pkg/controllers/kusciadeployment/reconcile.go | 2 + .../kusciatask/handler/pending_handler.go | 8 +- pkg/coredns/setup.go | 2 +- pkg/crd/apis/kuscia/v1alpha1/domain_types.go | 7 + .../apis/kuscia/v1alpha1/domainroute_types.go | 4 +- .../kuscia/v1alpha1/kusciadeployment_types.go | 64 + .../apis/kuscia/v1alpha1/kusciajob_types.go | 149 ++ .../apis/kuscia/v1alpha1/kusciatask_types.go | 82 + pkg/crd/apis/kuscia/v1alpha1/register.go | 8 + .../kuscia/v1alpha1/zz_generated.deepcopy.go | 511 ++++ .../v1alpha1/fake/fake_kuscia_client.go | 24 + .../fake/fake_kusciabetadeployment.go | 140 + .../v1alpha1/fake/fake_kusciabetajob.go | 140 + .../v1alpha1/fake/fake_kusciabetatask.go | 140 + .../fake/fake_kusciadeploymentsummary.go | 140 + .../v1alpha1/fake/fake_kusciajobsummary.go | 140 + .../v1alpha1/fake/fake_kusciatasksummary.go | 140 + .../kuscia/v1alpha1/generated_expansion.go | 12 + .../typed/kuscia/v1alpha1/kuscia_client.go | 30 + .../kuscia/v1alpha1/kusciabetadeployment.go | 193 ++ .../typed/kuscia/v1alpha1/kusciabetajob.go | 193 ++ .../typed/kuscia/v1alpha1/kusciabetatask.go | 193 ++ .../v1alpha1/kusciadeploymentsummary.go | 193 ++ .../typed/kuscia/v1alpha1/kusciajobsummary.go | 193 ++ .../kuscia/v1alpha1/kusciatasksummary.go | 193 ++ pkg/crd/informers/externalversions/generic.go | 12 + .../kuscia/v1alpha1/interface.go | 42 + .../kuscia/v1alpha1/kusciabetadeployment.go | 88 + .../kuscia/v1alpha1/kusciabetajob.go | 88 + .../kuscia/v1alpha1/kusciabetatask.go | 88 + .../v1alpha1/kusciadeploymentsummary.go | 88 + .../kuscia/v1alpha1/kusciajobsummary.go | 88 + .../kuscia/v1alpha1/kusciatasksummary.go | 88 + .../kuscia/v1alpha1/expansion_generated.go | 48 + .../kuscia/v1alpha1/kusciabetadeployment.go | 97 + .../listers/kuscia/v1alpha1/kusciabetajob.go | 97 + .../listers/kuscia/v1alpha1/kusciabetatask.go | 97 + .../v1alpha1/kusciadeploymentsummary.go | 97 + .../kuscia/v1alpha1/kusciajobsummary.go | 97 + .../kuscia/v1alpha1/kusciatasksummary.go | 97 + pkg/gateway/commands/root.go | 16 + pkg/gateway/controller/domain_route.go | 9 +- pkg/gateway/controller/handshake.go | 47 +- pkg/kusciaapi/config/kusciaapi_config.go | 9 +- pkg/kusciaapi/constants/constants.go | 3 - pkg/kusciaapi/service/job_service.go | 41 +- pkg/kusciaapi/service/serving_service.go | 24 +- pkg/utils/queue/queue.go | 2 +- pkg/utils/tls/crypt.go | 8 +- pkg/web/framework/config/tls_server_config.go | 4 +- proto/api/v1alpha1/kusciaapi/job.pb.go | 440 ++-- proto/api/v1alpha1/kusciaapi/job.proto | 10 + proto/api/v1alpha1/kusciaapi/serving.pb.go | 171 +- proto/api/v1alpha1/kusciaapi/serving.proto | 12 +- scripts/deploy/add_domain.sh | 2 +- scripts/deploy/deploy.sh | 5 +- scripts/deploy/generate_rsa_key.sh | 6 + scripts/deploy/init_kusciaapi_client_certs.sh | 2 +- scripts/deploy/start_secretpad.sh | 2 +- scripts/deploy/start_standalone.sh | 21 +- scripts/templates/app_image.ezpsi.yaml | 39 - scripts/test/suite/core/functions.sh | 24 +- .../tools/register_app_image/ezpsi-image.yaml | 39 - .../register_app_image/register_app_image.sh | 5 +- 134 files changed, 11493 insertions(+), 617 deletions(-) create mode 100644 crds/v1alpha1/kuscia.secretflow_kusciabetadeployments.yaml create mode 100644 crds/v1alpha1/kuscia.secretflow_kusciabetajobs.yaml create mode 100644 crds/v1alpha1/kuscia.secretflow_kusciabetatasks.yaml create mode 100644 crds/v1alpha1/kuscia.secretflow_kusciadeploymentsummaries.yaml create mode 100644 crds/v1alpha1/kuscia.secretflow_kusciajobsummaries.yaml create mode 100644 crds/v1alpha1/kuscia.secretflow_kusciatasksummaries.yaml create mode 100644 docs/deployment/K8s_deployment_kuscia/K8s_master_lite_cn.md create mode 100644 docs/deployment/K8s_deployment_kuscia/K8s_p2p_cn.md create mode 100644 docs/deployment/K8s_deployment_kuscia/index.rst create mode 100644 docs/reference/troubleshoot/userdefinedserviceroute.md create mode 100644 etc/conf/kuscia.yaml create mode 100644 hack/k8s/autonomy/configmap.yaml create mode 100644 hack/k8s/autonomy/deployment.yaml create mode 100644 hack/k8s/autonomy/rbac.yaml create mode 100644 hack/k8s/autonomy/service.yaml create mode 100644 hack/k8s/lite/configmap.yaml create mode 100644 hack/k8s/lite/deployment.yaml create mode 100644 hack/k8s/lite/rbac.yaml create mode 100644 hack/k8s/lite/service.yaml create mode 100644 hack/k8s/master/configmap.yaml create mode 100644 hack/k8s/master/deployment.yaml create mode 100644 hack/k8s/master/service.yaml create mode 100644 pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciabetadeployment.go create mode 100644 pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciabetajob.go create mode 100644 pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciabetatask.go create mode 100644 pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciadeploymentsummary.go create mode 100644 pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciajobsummary.go create mode 100644 pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciatasksummary.go create mode 100644 pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciabetadeployment.go create mode 100644 pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciabetajob.go create mode 100644 pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciabetatask.go create mode 100644 pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciadeploymentsummary.go create mode 100644 pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciajobsummary.go create mode 100644 pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciatasksummary.go create mode 100644 pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciabetadeployment.go create mode 100644 pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciabetajob.go create mode 100644 pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciabetatask.go create mode 100644 pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciadeploymentsummary.go create mode 100644 pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciajobsummary.go create mode 100644 pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciatasksummary.go create mode 100644 pkg/crd/listers/kuscia/v1alpha1/kusciabetadeployment.go create mode 100644 pkg/crd/listers/kuscia/v1alpha1/kusciabetajob.go create mode 100644 pkg/crd/listers/kuscia/v1alpha1/kusciabetatask.go create mode 100644 pkg/crd/listers/kuscia/v1alpha1/kusciadeploymentsummary.go create mode 100644 pkg/crd/listers/kuscia/v1alpha1/kusciajobsummary.go create mode 100644 pkg/crd/listers/kuscia/v1alpha1/kusciatasksummary.go create mode 100755 scripts/deploy/generate_rsa_key.sh delete mode 100644 scripts/templates/app_image.ezpsi.yaml delete mode 100644 scripts/tools/register_app_image/ezpsi-image.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 0160df53..cb65b3c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 `Fixed` for any bug fixes. `Security` in case of vulnerabilities. +## [0.5.0.dev231215] - 2023-12-15 +### Added +- Add document for deploying Kuscia on k8s. +### Changed +- Optimize log output. + ## [0.5.0.dev231205] - 2023-12-5 ### Changed - Optimize Kuscia deployment configuration and add configuration documentation. diff --git a/cmd/kuscia/autonomy/autonomy.go b/cmd/kuscia/autonomy/autonomy.go index 800b5ada..ac43456f 100644 --- a/cmd/kuscia/autonomy/autonomy.go +++ b/cmd/kuscia/autonomy/autonomy.go @@ -51,17 +51,14 @@ func Run(ctx context.Context, configFile string, onlyControllers bool) error { defer cancel() kusciaConf := confloader.ReadConfig(configFile, common.RunModeAutonomy) - nlog.Debugf("Read kuscia config: %+v", kusciaConf) + conf := modules.InitDependencies(ctx, kusciaConf) + defer conf.Close() - // dns must start before dependencies because that dependencies init process may access network. var coreDnsModule modules.Module if !onlyControllers { coreDnsModule = modules.RunCoreDNS(runCtx, cancel, &kusciaConf) } - conf := modules.InitDependencies(ctx, kusciaConf, onlyControllers) - defer conf.Close() - if onlyControllers { conf.MakeClients() modules.RunOperatorsAllinOne(runCtx, cancel, conf, true) diff --git a/cmd/kuscia/confloader/config.go b/cmd/kuscia/confloader/config.go index d060ff3a..7d8c7b63 100644 --- a/cmd/kuscia/confloader/config.go +++ b/cmd/kuscia/confloader/config.go @@ -30,11 +30,6 @@ import ( var ( defaultRootDir = "/home/kuscia/" defaultEndpointForMaster = "https://127.0.0.1:6443" - CertPrefix = "etc/certs/" - LogPrefix = "var/logs/" - StdoutPrefix = "var/stdout/" - TmpPrefix = "var/tmp/" - ConfPrefix = "etc/conf/" ) type KusciaConfig struct { @@ -129,10 +124,10 @@ func defaultKusciaConfig(rootDir string) KusciaConfig { } return KusciaConfig{ RootDir: rootDir, - CAKeyFile: filepath.Join(rootDir, TmpPrefix, "ca.key"), - CACertFile: filepath.Join(rootDir, TmpPrefix, "ca.crt"), - DomainKeyFile: filepath.Join(rootDir, TmpPrefix, "domain.key"), - DomainCertFile: filepath.Join(rootDir, TmpPrefix, "domain.crt"), + CAKeyFile: filepath.Join(rootDir, common.CertPrefix, "ca.key"), + CACertFile: filepath.Join(rootDir, common.CertPrefix, "ca.crt"), + DomainKeyFile: filepath.Join(rootDir, common.CertPrefix, "domain.key"), + DomainCertFile: filepath.Join(rootDir, common.CertPrefix, "domain.crt"), EnvoyIP: hostIP, KusciaAPI: kaconfig.NewDefaultKusciaAPIConfig(rootDir), } diff --git a/cmd/kuscia/confloader/secretbackendloader.go b/cmd/kuscia/confloader/secretbackendloader.go index da66b8ef..933d3b2c 100644 --- a/cmd/kuscia/confloader/secretbackendloader.go +++ b/cmd/kuscia/confloader/secretbackendloader.go @@ -22,7 +22,6 @@ import ( "github.com/secretflow/kuscia/pkg/secretbackend" // register driver _ "github.com/secretflow/kuscia/pkg/secretbackend/mem" - _ "github.com/secretflow/kuscia/pkg/secretbackend/rfile" ) type SecretBackendParams struct { diff --git a/cmd/kuscia/lite/lite.go b/cmd/kuscia/lite/lite.go index 101d9aca..bb7e01e4 100644 --- a/cmd/kuscia/lite/lite.go +++ b/cmd/kuscia/lite/lite.go @@ -26,7 +26,6 @@ import ( "github.com/secretflow/kuscia/cmd/kuscia/modules" "github.com/secretflow/kuscia/cmd/kuscia/utils" "github.com/secretflow/kuscia/pkg/common" - "github.com/secretflow/kuscia/pkg/utils/nlog" ) func NewLiteCommand(ctx context.Context) *cobra.Command { @@ -48,14 +47,11 @@ func Run(ctx context.Context, configFile string) error { runCtx, cancel := context.WithCancel(ctx) defer cancel() kusciaConf := confloader.ReadConfig(configFile, common.RunModeLite) - nlog.Debugf("Read kuscia config: %+v", kusciaConf) + conf := modules.InitDependencies(ctx, kusciaConf) + defer conf.Close() - // dns must start before dependencies because that dependencies init process may access network. coreDnsModule := modules.RunCoreDNS(runCtx, cancel, &kusciaConf) - conf := modules.InitDependencies(ctx, kusciaConf, false) - defer conf.Close() - conf.MakeClients() if conf.EnableContainerd { diff --git a/cmd/kuscia/master/master.go b/cmd/kuscia/master/master.go index 515700bb..33e75633 100644 --- a/cmd/kuscia/master/master.go +++ b/cmd/kuscia/master/master.go @@ -59,16 +59,14 @@ func Run(ctx context.Context, configFile string, onlyControllers bool) error { defer cancel() kusciaConf := confloader.ReadConfig(configFile, common.RunModeMaster) - nlog.Debugf("Read kuscia config: %+v", kusciaConf) + conf := modules.InitDependencies(ctx, kusciaConf) + defer conf.Close() - // dns must start before dependencies because that dependencies init process may access network. var coreDnsModule modules.Module if !onlyControllers { coreDnsModule = modules.RunCoreDNS(runCtx, cancel, &kusciaConf) } - conf := modules.InitDependencies(ctx, kusciaConf, onlyControllers) - if onlyControllers { conf.MakeClients() modules.RunOperatorsAllinOne(runCtx, cancel, conf, false) diff --git a/cmd/kuscia/modules/agent.go b/cmd/kuscia/modules/agent.go index 0a2bc8ad..3ba180fb 100644 --- a/cmd/kuscia/modules/agent.go +++ b/cmd/kuscia/modules/agent.go @@ -22,9 +22,9 @@ import ( "path/filepath" "time" - "github.com/secretflow/kuscia/cmd/kuscia/confloader" "github.com/secretflow/kuscia/pkg/agent/commands" "github.com/secretflow/kuscia/pkg/agent/config" + "github.com/secretflow/kuscia/pkg/common" "github.com/secretflow/kuscia/pkg/utils/kubeconfig" "github.com/secretflow/kuscia/pkg/utils/meta" "github.com/secretflow/kuscia/pkg/utils/nlog" @@ -47,7 +47,7 @@ func NewAgent(i *Dependencies) Module { if err != nil { nlog.Fatalf("Get hostname fail: %v", err) } - conf.StdoutPath = filepath.Join(i.RootDir, confloader.StdoutPrefix) + conf.StdoutPath = filepath.Join(i.RootDir, common.StdoutPrefix) if conf.Node.NodeName == "" { conf.Node.NodeName = hostname } diff --git a/cmd/kuscia/modules/allinone_operator.go b/cmd/kuscia/modules/allinone_operator.go index eb24894b..a0bb09cd 100644 --- a/cmd/kuscia/modules/allinone_operator.go +++ b/cmd/kuscia/modules/allinone_operator.go @@ -34,6 +34,7 @@ func RunOperatorsAllinOne(runctx context.Context, cancel context.CancelFunc, con if startAgent { RunAgent(runctx, cancel, conf) + RunConfManager(runctx, cancel, conf) RunDataMesh(runctx, cancel, conf) RunTransport(runctx, cancel, conf) } diff --git a/cmd/kuscia/modules/confmanager.go b/cmd/kuscia/modules/confmanager.go index f9501a1b..ea7b360f 100644 --- a/cmd/kuscia/modules/confmanager.go +++ b/cmd/kuscia/modules/confmanager.go @@ -21,8 +21,10 @@ import ( "encoding/json" "fmt" "io" + "sync/atomic" "time" + "github.com/secretflow/kuscia/pkg/common" "github.com/secretflow/kuscia/pkg/confmanager/commands" "github.com/secretflow/kuscia/pkg/confmanager/config" "github.com/secretflow/kuscia/pkg/confmanager/service" @@ -77,14 +79,20 @@ func NewConfManager(ctx context.Context, d *Dependencies) (Module, error) { conf.DomainKey = d.DomainKey conf.TLS.RootCA = d.CACert conf.TLS.RootCAKey = d.CAKey - conf.DomainCertValue = &d.DomainCertByMasterValue + switch d.RunMode { + case common.RunModeLite: + conf.DomainCertValue = &d.DomainCertByMasterValue + case common.RunModeAutonomy: + conf.DomainCertValue = &atomic.Value{} + conf.DomainCertValue.Store(d.DomainCert) + } secretBackend := findSecretBackend(d.SecretBackendHolder, conf.Backend) if secretBackend == nil { return nil, fmt.Errorf("failed to find secret backend %s for cm", conf.Backend) } conf.BackendDriver = secretBackend - nlog.Infof("Conf manager config is %+v", conf) + nlog.Debugf("Conf manager config is %+v", conf) if err := conf.TLS.GenerateServerKeyCerts(serverCertsCommonName, nil, []string{defaultServerCertsSanDNSName}); err != nil { return nil, err @@ -110,7 +118,9 @@ func (m confManagerModule) Run(ctx context.Context) error { func (m confManagerModule) WaitReady(ctx context.Context) error { timeoutTicker := time.NewTicker(30 * time.Second) + defer timeoutTicker.Stop() checkTicker := time.NewTicker(1 * time.Second) + defer checkTicker.Stop() for { select { case <-checkTicker.C: diff --git a/cmd/kuscia/modules/containerd.go b/cmd/kuscia/modules/containerd.go index 3c170bfc..a33ddb9f 100644 --- a/cmd/kuscia/modules/containerd.go +++ b/cmd/kuscia/modules/containerd.go @@ -22,7 +22,7 @@ import ( "path/filepath" "time" - "github.com/secretflow/kuscia/cmd/kuscia/confloader" + pkgcom "github.com/secretflow/kuscia/pkg/common" "github.com/secretflow/kuscia/pkg/utils/common" "github.com/secretflow/kuscia/pkg/utils/nlog" "github.com/secretflow/kuscia/pkg/utils/nlog/ljwriter" @@ -44,8 +44,8 @@ func NewContainerd(i *Dependencies) Module { } func (s *containerdModule) Run(ctx context.Context) error { - configPath := filepath.Join(s.Root, confloader.ConfPrefix, "containerd.toml") - configPathTmpl := filepath.Join(s.Root, confloader.ConfPrefix, "containerd.toml.tmpl") + configPath := filepath.Join(s.Root, pkgcom.ConfPrefix, "containerd.toml") + configPathTmpl := filepath.Join(s.Root, pkgcom.ConfPrefix, "containerd.toml.tmpl") if err := common.RenderConfig(configPathTmpl, configPath, s); err != nil { return err } @@ -54,7 +54,7 @@ func (s *containerdModule) Run(ctx context.Context) error { crictlFile := "/etc/crictl.yaml" if _, err := os.Stat(crictlFile); err != nil { if os.IsNotExist(err) { - if err = os.Link(filepath.Join(s.Root, confloader.ConfPrefix, "crictl.yaml"), crictlFile); err != nil { + if err = os.Link(filepath.Join(s.Root, pkgcom.ConfPrefix, "crictl.yaml"), crictlFile); err != nil { return err } } else { @@ -72,7 +72,7 @@ func (s *containerdModule) Run(ctx context.Context) error { } sp := supervisor.NewSupervisor("containerd", nil, -1) - s.LogConfig.LogPath = filepath.Join(s.Root, confloader.LogPrefix, "containerd.log") + s.LogConfig.LogPath = filepath.Join(s.Root, pkgcom.LogPrefix, "containerd.log") lj, _ := ljwriter.New(&s.LogConfig) n := nlog.NewNLog(nlog.SetWriter(lj)) return sp.Run(ctx, func(ctx context.Context) supervisor.Cmd { diff --git a/cmd/kuscia/modules/coredns.go b/cmd/kuscia/modules/coredns.go index 1233df11..131d77f0 100644 --- a/cmd/kuscia/modules/coredns.go +++ b/cmd/kuscia/modules/coredns.go @@ -28,6 +28,7 @@ import ( "k8s.io/client-go/kubernetes" "github.com/secretflow/kuscia/cmd/kuscia/confloader" + "github.com/secretflow/kuscia/pkg/common" "github.com/secretflow/kuscia/pkg/coredns" "github.com/secretflow/kuscia/pkg/utils/network" "github.com/secretflow/kuscia/pkg/utils/nlog" @@ -120,7 +121,7 @@ func (s *CorednsModule) Run(ctx context.Context) error { ) dnsserver.Directives = directives - contents, err := os.ReadFile(filepath.Join(s.rootDir, confloader.ConfPrefix, "corefile")) + contents, err := os.ReadFile(filepath.Join(s.rootDir, common.ConfPrefix, "corefile")) if err != nil { return err } @@ -183,7 +184,7 @@ func prepareResolvConf(rootDir string) error { } resolvConf := "/etc/resolv.conf" - backupResolvConf := filepath.Join(rootDir, confloader.TmpPrefix, "resolv.conf") + backupResolvConf := filepath.Join(rootDir, common.TmpPrefix, "resolv.conf") exist := paths.CheckFileExist(backupResolvConf) if !exist { if err = paths.CopyFile(resolvConf, backupResolvConf); err != nil { diff --git a/cmd/kuscia/modules/domainroute.go b/cmd/kuscia/modules/domainroute.go index a825efb6..1d231357 100644 --- a/cmd/kuscia/modules/domainroute.go +++ b/cmd/kuscia/modules/domainroute.go @@ -21,7 +21,6 @@ import ( "path/filepath" "time" - "github.com/secretflow/kuscia/cmd/kuscia/confloader" "github.com/secretflow/kuscia/pkg/common" "github.com/secretflow/kuscia/pkg/gateway/commands" "github.com/secretflow/kuscia/pkg/gateway/config" @@ -42,7 +41,7 @@ type domainRouteModule struct { func NewDomainRoute(i *Dependencies) Module { conf := config.DefaultStaticGatewayConfig() conf.RootDir = i.RootDir - conf.ConfBasedir = filepath.Join(i.RootDir, confloader.ConfPrefix, "domainroute") + conf.ConfBasedir = filepath.Join(i.RootDir, common.ConfPrefix, "domainroute") conf.DomainID = i.DomainID conf.DomainKey = i.DomainKey conf.MasterConfig = &i.Master diff --git a/cmd/kuscia/modules/envoy.go b/cmd/kuscia/modules/envoy.go index 5dbd7478..e74cc3f9 100644 --- a/cmd/kuscia/modules/envoy.go +++ b/cmd/kuscia/modules/envoy.go @@ -27,7 +27,7 @@ import ( "gopkg.in/yaml.v3" - "github.com/secretflow/kuscia/cmd/kuscia/confloader" + "github.com/secretflow/kuscia/pkg/common" "github.com/secretflow/kuscia/pkg/gateway/utils" "github.com/secretflow/kuscia/pkg/utils/nlog" "github.com/secretflow/kuscia/pkg/utils/supervisor" @@ -87,7 +87,7 @@ func NewEnvoy(i *Dependencies) Module { } func (s *envoyModule) Run(ctx context.Context) error { - if err := os.MkdirAll(filepath.Join(s.rootDir, confloader.LogPrefix, "envoy/"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(s.rootDir, common.LogPrefix, "envoy/"), 0755); err != nil { return err } deltaArgs, err := s.readCommandArgs() @@ -97,13 +97,13 @@ func (s *envoyModule) Run(ctx context.Context) error { args := []string{ "-c", - filepath.Join(s.rootDir, confloader.ConfPrefix, "envoy/envoy.yaml"), + filepath.Join(s.rootDir, common.ConfPrefix, "envoy/envoy.yaml"), "--service-cluster", s.cluster, "--service-node", s.id, "--log-path", - filepath.Join(s.rootDir, confloader.LogPrefix, "envoy/envoy.log"), + filepath.Join(s.rootDir, common.LogPrefix, "envoy/envoy.log"), } args = append(args, deltaArgs.Args...) sp := supervisor.NewSupervisor("envoy", nil, -1) @@ -129,7 +129,7 @@ func (s *envoyModule) logRotate(ctx context.Context) { time.Sleep(d) - cmd := exec.Command("logrotate", filepath.Join(s.rootDir, confloader.ConfPrefix, "logrotate.conf")) + cmd := exec.Command("logrotate", filepath.Join(s.rootDir, common.ConfPrefix, "logrotate.conf")) if err := cmd.Run(); err != nil { nlog.Errorf("Logrotate run error: %v", err) } @@ -158,7 +158,7 @@ func (s *envoyModule) Name() string { } func (s *envoyModule) readCommandArgs() (*EnvoyCommandLineConfig, error) { - configPath := filepath.Join(s.rootDir, confloader.ConfPrefix, s.commandLineConfigFile) + configPath := filepath.Join(s.rootDir, common.ConfPrefix, s.commandLineConfigFile) data, err := os.ReadFile(configPath) if err != nil { return nil, err diff --git a/cmd/kuscia/modules/k3s.go b/cmd/kuscia/modules/k3s.go index 19595c00..191d903a 100644 --- a/cmd/kuscia/modules/k3s.go +++ b/cmd/kuscia/modules/k3s.go @@ -35,7 +35,7 @@ import ( "github.com/google/uuid" - "github.com/secretflow/kuscia/cmd/kuscia/confloader" + pkgcom "github.com/secretflow/kuscia/pkg/common" "github.com/secretflow/kuscia/pkg/utils/common" "github.com/secretflow/kuscia/pkg/utils/network" "github.com/secretflow/kuscia/pkg/utils/nlog/ljwriter" @@ -128,7 +128,6 @@ func NewK3s(i *Dependencies) Module { if clusterToken == "" { clusterToken = fmt.Sprintf("%x", md5.Sum([]byte(i.DomainID))) } - nlog.Infof("ClusterToken is: %s", clusterToken) hostIP, err := network.GetHostIP() if err != nil { nlog.Fatal(err) @@ -176,15 +175,15 @@ func (s *k3sModule) Run(ctx context.Context) error { } if s.enableAudit { args = append(args, - "--kube-apiserver-arg=audit-log-path="+filepath.Join(s.rootDir, confloader.LogPrefix, "k3s-audit.log"), - "--kube-apiserver-arg=audit-policy-file="+filepath.Join(s.rootDir, confloader.ConfPrefix, "k3s/k3s-audit-policy.yaml"), + "--kube-apiserver-arg=audit-log-path="+filepath.Join(s.rootDir, pkgcom.LogPrefix, "k3s-audit.log"), + "--kube-apiserver-arg=audit-policy-file="+filepath.Join(s.rootDir, pkgcom.ConfPrefix, "k3s/k3s-audit-policy.yaml"), "--kube-apiserver-arg=audit-log-maxbackup=10", "--kube-apiserver-arg=audit-log-maxsize=300", ) } sp := supervisor.NewSupervisor("k3s", nil, -1) - s.LogConfig.LogPath = filepath.Join(s.rootDir, confloader.LogPrefix, "k3s.log") + s.LogConfig.LogPath = filepath.Join(s.rootDir, pkgcom.LogPrefix, "k3s.log") lj, _ := ljwriter.New(&s.LogConfig) n := nlog.NewNLog(nlog.SetWriter(lj)) @@ -274,9 +273,9 @@ func genKusciaKubeConfig(conf *Dependencies) error { serverCertFile: filepath.Join(conf.RootDir, k3sDataDirPrefix, "server/tls/server-ca.crt"), clientKeyFile: filepath.Join(conf.RootDir, k3sDataDirPrefix, "server/tls/client-ca.key"), clientCertFile: filepath.Join(conf.RootDir, k3sDataDirPrefix, "server/tls/client-ca.crt"), - clusterRoleFile: filepath.Join(conf.RootDir, confloader.ConfPrefix, "kuscia-clusterrole.yaml"), - clusterRoleBindingFile: filepath.Join(conf.RootDir, confloader.ConfPrefix, "kuscia-clusterrolebinding.yaml"), - kubeConfigTmplFile: filepath.Join(conf.RootDir, confloader.ConfPrefix, "kuscia.kubeconfig.tmpl"), + clusterRoleFile: filepath.Join(conf.RootDir, pkgcom.ConfPrefix, "kuscia-clusterrole.yaml"), + clusterRoleBindingFile: filepath.Join(conf.RootDir, pkgcom.ConfPrefix, "kuscia-clusterrolebinding.yaml"), + kubeConfigTmplFile: filepath.Join(conf.RootDir, pkgcom.ConfPrefix, "kuscia.kubeconfig.tmpl"), kubeConfig: conf.KusciaKubeConfig, } @@ -341,7 +340,7 @@ func genKusciaKubeConfig(conf *Dependencies) error { func applyKusciaResources(conf *Dependencies) error { // apply kuscia clusterRole resourceFiles := []string{ - filepath.Join(conf.RootDir, confloader.ConfPrefix, "domain-cluster-res.yaml"), + filepath.Join(conf.RootDir, pkgcom.ConfPrefix, "domain-cluster-res.yaml"), } sw := sync.WaitGroup{} for _, file := range resourceFiles { diff --git a/cmd/kuscia/modules/kusciaapi.go b/cmd/kuscia/modules/kusciaapi.go index 40511d9f..fb688132 100644 --- a/cmd/kuscia/modules/kusciaapi.go +++ b/cmd/kuscia/modules/kusciaapi.go @@ -44,6 +44,10 @@ import ( "github.com/secretflow/kuscia/proto/api/v1alpha1/kusciaapi" ) +const ( + kusciaAPISanDNSName = "kusciaapi" +) + type kusciaAPIModule struct { conf *config.KusciaAPIConfig kusciaClient kusciaclientset.Interface @@ -61,7 +65,7 @@ func NewKusciaAPI(d *Dependencies) (Module, error) { kusciaAPIConfig.DomainKey = d.DomainKey kusciaAPIConfig.TLS.RootCA = d.CACert kusciaAPIConfig.TLS.RootCAKey = d.CAKey - kusciaAPIConfig.TLS.CommonName = d.DomainID + kusciaAPIConfig.TLS.CommonName = "KusciaAPI" kusciaAPIConfig.RunMode = d.RunMode kusciaAPIConfig.DomainCertValue = &d.DomainCertByMasterValue kusciaAPIConfig.TLS.Protocol = d.Protocol @@ -74,7 +78,7 @@ func NewKusciaAPI(d *Dependencies) (Module, error) { } if kusciaAPIConfig.TLS != nil { - if err := kusciaAPIConfig.TLS.LoadFromDataOrFile(); err != nil { + if err := kusciaAPIConfig.TLS.LoadFromDataOrFile(nil, []string{kusciaAPISanDNSName}); err != nil { return nil, err } } diff --git a/cmd/kuscia/modules/modules.go b/cmd/kuscia/modules/modules.go index 50ab6053..c4258535 100644 --- a/cmd/kuscia/modules/modules.go +++ b/cmd/kuscia/modules/modules.go @@ -86,11 +86,11 @@ type Module interface { Name() string } -func (dependencies *Dependencies) LoadCaDomainKeyAndCert() error { +func (d *Dependencies) LoadCaDomainKeyAndCert() error { var err error - config := dependencies.KusciaConfig + config := d.KusciaConfig - if dependencies.CAKey, err = tlsutils.ParseEncodedKey(config.CAKeyData, config.CAKeyFile); err != nil { + if d.CAKey, err = tlsutils.ParseEncodedKey(config.CAKeyData, config.CAKeyFile); err != nil { nlog.Errorf("load key failed: key: %t, file: %s", len(config.CAKeyData) == 0, config.CAKeyFile) return err } @@ -100,12 +100,12 @@ func (dependencies *Dependencies) LoadCaDomainKeyAndCert() error { return err } - if dependencies.CACert, err = tlsutils.ParseCertWithGenerated(dependencies.CAKey, config.DomainID, nil, config.CACertFile); err != nil { - nlog.Errorf("load cert failed: file: %s", dependencies.CACertFile) + if d.CACert, err = tlsutils.ParseCertWithGenerated(d.CAKey, config.DomainID, nil, config.CACertFile); err != nil { + nlog.Errorf("load cert failed: file: %s", d.CACertFile) return err } - if dependencies.DomainKey, err = tlsutils.ParseEncodedKey(config.DomainKeyData, config.DomainKeyFile); err != nil { + if d.DomainKey, err = tlsutils.ParseEncodedKey(config.DomainKeyData, config.DomainKeyFile); err != nil { nlog.Errorf("load key failed: key: %t, file: %s", len(config.CAKeyData) == 0, config.DomainKeyFile) return err } @@ -115,8 +115,8 @@ func (dependencies *Dependencies) LoadCaDomainKeyAndCert() error { return err } - if dependencies.DomainCert, err = tlsutils.ParseCertWithGenerated(dependencies.DomainKey, dependencies.DomainID, nil, config.DomainCertFile); err != nil { - nlog.Errorf("load cert failed: file: %s", dependencies.DomainCertFile) + if d.DomainCert, err = tlsutils.ParseCertWithGenerated(d.DomainKey, d.DomainID, nil, config.DomainCertFile); err != nil { + nlog.Errorf("load cert failed: file: %s", d.DomainCertFile) return err } @@ -124,16 +124,16 @@ func (dependencies *Dependencies) LoadCaDomainKeyAndCert() error { } func EnsureDir(conf *Dependencies) error { - if err := os.MkdirAll(filepath.Join(conf.RootDir, confloader.CertPrefix), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(conf.RootDir, common.CertPrefix), 0755); err != nil { return err } - if err := os.MkdirAll(filepath.Join(conf.RootDir, confloader.LogPrefix), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(conf.RootDir, common.LogPrefix), 0755); err != nil { return err } - if err := os.MkdirAll(filepath.Join(conf.RootDir, confloader.StdoutPrefix), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(conf.RootDir, common.StdoutPrefix), 0755); err != nil { return err } - if err := os.MkdirAll(filepath.Join(conf.RootDir, confloader.TmpPrefix), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(conf.RootDir, common.TmpPrefix), 0755); err != nil { return err } return nil @@ -181,11 +181,10 @@ func InitLogs(logConfig *nlog.LogConfig) error { return nil } -func InitDependencies(ctx context.Context, kusciaConf confloader.KusciaConfig, onlyControllerMode bool) *Dependencies { +func InitDependencies(ctx context.Context, kusciaConf confloader.KusciaConfig) *Dependencies { dependencies := &Dependencies{ KusciaConfig: kusciaConf, } - // init log logConfig := &nlog.LogConfig{ LogLevel: kusciaConf.LogLevel, @@ -197,27 +196,24 @@ func InitDependencies(ctx context.Context, kusciaConf confloader.KusciaConfig, o if err := InitLogs(logConfig); err != nil { nlog.Fatal(err) } + nlog.Debugf("Read kuscia config: %+v", kusciaConf) dependencies.LogConfig = logConfig // run config loader - if !onlyControllerMode { - dependencies.SecretBackendHolder = secretbackend.NewHolder() - nlog.Info("Start to init all secret backends ... ") - initDefaultSecretBackend := true - for _, sbc := range dependencies.SecretBackends { - if err := dependencies.SecretBackendHolder.Init(sbc.Name, sbc.Driver, sbc.Params); err != nil { - nlog.Fatalf("Init secret backend name=%s params=%+v failed: %s", sbc.Name, sbc.Params, err) - } - initDefaultSecretBackend = false + dependencies.SecretBackendHolder = secretbackend.NewHolder() + nlog.Info("Start to init all secret backends ... ") + for _, sbc := range dependencies.SecretBackends { + if err := dependencies.SecretBackendHolder.Init(sbc.Name, sbc.Driver, sbc.Params); err != nil { + nlog.Fatalf("Init secret backend name=%s params=%+v failed: %s", sbc.Name, sbc.Params, err) } - if initDefaultSecretBackend { - nlog.Warnf("Init all secret backend but no provider found, creating default mem type") - if err := dependencies.SecretBackendHolder.Init(common.DefaultSecretBackendName, common.DefaultSecretBackendType, map[string]any{}); err != nil { - nlog.Fatalf("Init default secret backend failed: %s", err) - } + } + if len(dependencies.SecretBackends) == 0 { + nlog.Warnf("Init all secret backend but no provider found, creating default mem type") + if err := dependencies.SecretBackendHolder.Init(common.DefaultSecretBackendName, common.DefaultSecretBackendType, map[string]any{}); err != nil { + nlog.Fatalf("Init default secret backend failed: %s", err) } - nlog.Info("Finish to init all secret backends") } + nlog.Info("Finish Initializing all secret backends") configLoaders, err := confloader.NewConfigLoaderChain(ctx, dependencies.KusciaConfig.ConfLoaders, dependencies.SecretBackendHolder) if err != nil { diff --git a/cmd/kuscia/modules/modules_test.go b/cmd/kuscia/modules/modules_test.go index 5b8f1276..bf00074e 100644 --- a/cmd/kuscia/modules/modules_test.go +++ b/cmd/kuscia/modules/modules_test.go @@ -69,8 +69,8 @@ func Test_LoadKusciaConfig(t *testing.T) { rootDir: /home/kuscia domainID: kuscia caKeyFile: var/tmp/ca.key -caFile: var/tmp//ca.crt -domainKeyFile: var/tmp//domain.key +caFile: var/tmp/ca.crt +domainKeyFile: var/tmp/domain.key master: endpoint: http://127.0.0.1:1080 tls: diff --git a/cmd/kuscia/modules/scheduler.go b/cmd/kuscia/modules/scheduler.go index 365428a2..43b9e254 100644 --- a/cmd/kuscia/modules/scheduler.go +++ b/cmd/kuscia/modules/scheduler.go @@ -28,7 +28,7 @@ import ( "k8s.io/kubernetes/cmd/kube-scheduler/app/options" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" - "github.com/secretflow/kuscia/cmd/kuscia/confloader" + pkgcom "github.com/secretflow/kuscia/pkg/common" "github.com/secretflow/kuscia/pkg/scheduler/kusciascheduling" "github.com/secretflow/kuscia/pkg/scheduler/queuesort" "github.com/secretflow/kuscia/pkg/utils/common" @@ -81,8 +81,8 @@ func NewScheduler(i *Dependencies) Module { } func (s *schedulerModule) Run(ctx context.Context) error { - configPathTmpl := filepath.Join(s.rootDir, confloader.ConfPrefix, "scheduler-config.yaml.tmpl") - configPath := filepath.Join(s.rootDir, confloader.ConfPrefix, "scheduler-config.yaml") + configPathTmpl := filepath.Join(s.rootDir, pkgcom.ConfPrefix, "scheduler-config.yaml.tmpl") + configPath := filepath.Join(s.rootDir, pkgcom.ConfPrefix, "scheduler-config.yaml") if err := common.RenderConfig(configPathTmpl, configPath, s); err != nil { return err } diff --git a/cmd/kuscia/modules/transport.go b/cmd/kuscia/modules/transport.go index 92166d6a..0bd4db03 100644 --- a/cmd/kuscia/modules/transport.go +++ b/cmd/kuscia/modules/transport.go @@ -23,7 +23,7 @@ import ( "path/filepath" "time" - "github.com/secretflow/kuscia/cmd/kuscia/confloader" + "github.com/secretflow/kuscia/pkg/common" "github.com/secretflow/kuscia/pkg/transport/config" "github.com/secretflow/kuscia/pkg/transport/server/http" "github.com/secretflow/kuscia/pkg/utils/nlog" @@ -58,7 +58,7 @@ func (t *transportModule) runAsGoroutine(ctx context.Context) error { } func (t *transportModule) runAsSubProcess(ctx context.Context) error { - LogDir := filepath.Join(t.rootDir, confloader.LogPrefix, fmt.Sprintf("%s/", transportModuleName)) + LogDir := filepath.Join(t.rootDir, common.LogPrefix, fmt.Sprintf("%s/", transportModuleName)) if err := os.MkdirAll(LogDir, 0755); err != nil { return err } diff --git a/crds/v1alpha1/kuscia.secretflow_domainroutes.yaml b/crds/v1alpha1/kuscia.secretflow_domainroutes.yaml index f379468e..7549f778 100644 --- a/crds/v1alpha1/kuscia.secretflow_domainroutes.yaml +++ b/crds/v1alpha1/kuscia.secretflow_domainroutes.yaml @@ -191,9 +191,9 @@ spec: description: DomainRouteStatus represents information about the status of DomainRoute. properties: - IsDestinationUnreachable: + isDestinationAuthorized: type: boolean - isDestinationAuthrized: + isDestinationUnreachable: type: boolean tokenStatus: description: DomainRouteTokenStatus represents information about the @@ -270,8 +270,8 @@ spec: type: array type: object required: - - IsDestinationUnreachable - - isDestinationAuthrized + - isDestinationAuthorized + - isDestinationUnreachable type: object type: object served: true diff --git a/crds/v1alpha1/kuscia.secretflow_domains.yaml b/crds/v1alpha1/kuscia.secretflow_domains.yaml index da17f81b..cf261498 100644 --- a/crds/v1alpha1/kuscia.secretflow_domains.yaml +++ b/crds/v1alpha1/kuscia.secretflow_domains.yaml @@ -63,6 +63,13 @@ spec: items: type: string type: array + master: + description: MasterDomain is used to represent the master domain id + of current domain. For a omit domain, MasterDomain is exactly local + cluster's master For a partner domain, the default MasterDomain + is the domain itself Only for a partner domain which is not an autonomy + domain, you need to specify its master domain explicitly + type: string resourceQuota: description: DomainResourceQuota defines domain resource quota. properties: diff --git a/crds/v1alpha1/kuscia.secretflow_kusciabetadeployments.yaml b/crds/v1alpha1/kuscia.secretflow_kusciabetadeployments.yaml new file mode 100644 index 00000000..005634e4 --- /dev/null +++ b/crds/v1alpha1/kuscia.secretflow_kusciabetadeployments.yaml @@ -0,0 +1,2273 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: kusciabetadeployments.kuscia.secretflow +spec: + group: kuscia.secretflow + names: + kind: KusciaBetaDeployment + listKind: KusciaBetaDeploymentList + plural: kusciabetadeployments + shortNames: + - kbd + singular: kusciabetadeployment + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.totalParties + name: TotalParties + type: integer + - jsonPath: .status.availableParties + name: AvailableParties + type: integer + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: KusciaBetaDeployment is the Schema for the kuscia deployment + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KusciaDeploymentSpec defines the information of kuscia deployment + spec. + properties: + initiator: + type: string + inputConfig: + type: string + parties: + items: + description: KusciaDeploymentParty defines the kuscia deployment + party info. + properties: + appImageRef: + type: string + domainID: + type: string + role: + type: string + template: + description: KusciaDeploymentPartyTemplate defines the template + info for party. + properties: + replicas: + description: Number of desired pods. This is a pointer to + distinguish between explicit zero and not specified. Defaults + to 1. + format: int32 + type: integer + spec: + description: PodSpec defines the spec info of pod. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most + preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit weight + 0 (i.e. it's a no-op). A null preferred + scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. + If the affinity requirements specified by + this field cease to be met at some point during + pod execution (e.g. due to an update), the + system may or may not try to eventually evict + the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules + (e.g. co-locate this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest + sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the + set of namespaces that the term + applies to. The term is applied + to the union of the namespaces selected + by this field and the ones listed + in the namespaces field. null selector + and null or empty namespaces list + means "this pod's namespace". An + empty selector ({}) matches all + namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The term + is applied to the union of the namespaces + listed in this field and the ones + selected by namespaceSelector. null + or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in + the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. + If the affinity requirements specified by + this field cease to be met at some point during + pod execution (e.g. due to a pod label update), + the system may or may not try to eventually + evict the pod from its node. When there are + multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label query over a set + of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies + to. The term is applied to the union + of the namespaces selected by this field + and the ones listed in the namespaces + field. null selector and null or empty + namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same + node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity + expressions specified by this field, but it + may choose a node that violates one or more + of the expressions. The node that is most + preferred is the one with the greatest sum + of weights, i.e. for each node that meets + all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most + preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the + set of namespaces that the term + applies to. The term is applied + to the union of the namespaces selected + by this field and the ones listed + in the namespaces field. null selector + and null or empty namespaces list + means "this pod's namespace". An + empty selector ({}) matches all + namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The term + is applied to the union of the namespaces + listed in this field and the ones + selected by namespaceSelector. null + or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in + the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements + specified by this field are not met at scheduling + time, the pod will not be scheduled onto the + node. If the anti-affinity requirements specified + by this field cease to be met at some point + during pod execution (e.g. due to a pod label + update), the system may or may not try to + eventually evict the pod from its node. When + there are multiple elements, the lists of + nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label query over a set + of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies + to. The term is applied to the union + of the namespaces selected by this field + and the ones listed in the namespaces + field. null selector and null or empty + namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + containers: + items: + description: Container defines the container info. + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + configVolumeMounts: + items: + description: ConfigVolumeMount defines config + volume mount info. + properties: + mountPath: + type: string + subPath: + type: string + required: + - mountPath + - subPath + type: object + type: array + env: + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined + environment variables in the container + and any service environment variables. + If a variable cannot be resolved, the + reference in the input string will be + unchanged. Double $$ are reduced to a + single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, + regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be used if value + is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the + ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the + pod: supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, + status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of + the container: only resources limits + and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret + in the pod's namespace + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the + Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be + a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + imagePullPolicy: + description: PullPolicy describes a policy for + if/when to pull a container image + type: string + livenessProbe: + description: Probe describes a health check to + be performed against a container to determine + whether it is alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to + take. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed after + having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness probes + are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully upon + probe failure. The grace period is the duration + in seconds after the processes running in + the pod are sent a termination signal and + the time when the processes are forcibly + halted with a kill signal. Set this value + longer than the expected cleanup time for + your process. If this value is nil, the + pod's terminationGracePeriodSeconds will + be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + description: ContainerPort describes container + port info. + properties: + name: + type: string + port: + format: int32 + type: integer + protocol: + default: HTTP + description: PortProtocol defines the network + protocols. + enum: + - HTTP + - GRPC + type: string + scope: + default: Local + description: PortScope defines the port + usage scope. + enum: + - Cluster + - Domain + - Local + type: string + required: + - name + type: object + type: array + readinessProbe: + description: Probe describes a health check to + be performed against a container to determine + whether it is alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to + take. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed after + having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness probes + are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully upon + probe failure. The grace period is the duration + in seconds after the processes running in + the pod are sent a termination signal and + the time when the processes are forcibly + halted with a kill signal. Set this value + longer than the expected cleanup time for + your process. If this value is nil, the + pod's terminationGracePeriodSeconds will + be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: ResourceRequirements describes the + compute resource requirements. + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are + used by this container. \n This is an alpha + field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. + It can only be set for containers." + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name + of one entry in pod.spec.resourceClaims + of the Pod where this field is used. + It makes that resource available inside + a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum + amount of compute resources allowed. More + info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If + Requests is omitted for a container, it + defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: SecurityContext only privileged works + now. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges + than its parent process. This bool directly + controls if the no_new_privs flag will be + set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) + run as Privileged 2) has CAP_SYS_ADMIN Note + that this field cannot be set when spec.os.name + is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop + when running containers. Defaults to the + default set of capabilities granted by the + container runtime. Note that this field + cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX + capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX + capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. + Processes in privileged containers are essentially + equivalent to root on the host. Defaults + to false. Note that this field cannot be + set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of + proc mount to use for the containers. The + default is DefaultProcMount which uses the + container runtime defaults for readonly + paths and masked paths. This requires the + ProcMountType feature flag to be enabled. + Note that this field cannot be set when + spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a + read-only root filesystem. Default is false. + Note that this field cannot be set when + spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint + of the container process. Uses runtime default + if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. Note that this field cannot + be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container + must run as a non-root user. If true, the + Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 + (root) and fail to start the container if + it does. If unset or false, no such validation + will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint + of the container process. Defaults to user + specified in image metadata if unspecified. + May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. Note that this field cannot + be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied + to the container. If unspecified, the container + runtime will allocate a random SELinux context + for each container. May also be set in + PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name + is windows. + properties: + level: + description: Level is SELinux level label + that applies to the container. + type: string + role: + description: Role is a SELinux role label + that applies to the container. + type: string + type: + description: Type is a SELinux type label + that applies to the container. + type: string + user: + description: User is a SELinux user label + that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by + this container. If seccomp options are provided + at both the pod & container level, the container + options override the pod options. Note that + this field cannot be set when spec.os.name + is windows. + properties: + localhostProfile: + description: localhostProfile indicates + a profile defined in a file on the node + should be used. The profile must be + preconfigured on the node to work. Must + be a descending path, relative to the + kubelet's configured seccomp profile + location. Must only be set if type is + "Localhost". + type: string + type: + description: "type indicates which kind + of seccomp profile will be applied. + Valid options are: \n Localhost - a + profile defined in a file on the node + should be used. RuntimeDefault - the + container runtime default profile should + be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings + applied to all containers. If unspecified, + the options from the PodSecurityContext + will be used. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name + is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where + the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName + field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is + the name of the GMSA credential spec + to use. + type: string + hostProcess: + description: HostProcess determines if + a container should be run as a 'Host + Process' container. This field is alpha-level + and will only be honored by components + that enable the WindowsHostProcessContainers + feature flag. Setting this field without + the feature flag will result in errors + when validating the Pod. All of a Pod's + containers must have the same effective + HostProcess value (it is not allowed + to have a mix of HostProcess containers + and non-HostProcess containers). In + addition, if HostProcess is true then + HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to + run the entrypoint of the container + process. Defaults to the user specified + in image metadata if unspecified. May + also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probe describes a health check to + be performed against a container to determine + whether it is alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to + take. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed after + having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness probes + are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully upon + probe failure. The grace period is the duration + in seconds after the processes running in + the pod are sent a termination signal and + the time when the processes are forcibly + halted with a kill signal. Set this value + longer than the expected cleanup time for + your process. If this value is nil, the + pod's terminationGracePeriodSeconds will + be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + workingDir: + type: string + required: + - name + type: object + type: array + restartPolicy: + description: 'Restart policy for all containers within + the pod. One of Always, OnFailure, Never. Default + to Never. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + type: string + type: object + strategy: + description: The deployment strategy to use to replace existing + pods with new ones. + properties: + rollingUpdate: + description: 'Rolling update config params. Present + only if DeploymentStrategyType = RollingUpdate. --- + TODO: Update this to follow our convention for oneOf, + whatever we decide it to be.' + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: 'The maximum number of pods that can + be scheduled above the desired number of pods. + Value can be an absolute number (ex: 5) or a percentage + of desired pods (ex: 10%). This can not be 0 if + MaxUnavailable is 0. Absolute number is calculated + from percentage by rounding up. Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet + can be scaled up immediately when the rolling + update starts, such that the total number of old + and new pods do not exceed 130% of desired pods. + Once old pods have been killed, new ReplicaSet + can be scaled up further, ensuring that total + number of pods running at any time during the + update is at most 130% of desired pods.' + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: 'The maximum number of pods that can + be unavailable during the update. Value can be + an absolute number (ex: 5) or a percentage of + desired pods (ex: 10%). Absolute number is calculated + from percentage by rounding down. This can not + be 0 if MaxSurge is 0. Defaults to 25%. Example: + when this is set to 30%, the old ReplicaSet can + be scaled down to 70% of desired pods immediately + when the rolling update starts. Once new pods + are ready, old ReplicaSet can be scaled down further, + followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all + times during the update is at least 70% of desired + pods.' + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or + "RollingUpdate". Default is RollingUpdate. + type: string + type: object + type: object + required: + - appImageRef + - domainID + type: object + type: array + required: + - initiator + - inputConfig + - parties + type: object + status: + description: KusciaDeploymentStatus defines the observed state of kuscia + deployment. + properties: + availableParties: + description: Total number of available parties. + type: integer + lastReconcileTime: + description: Represents last time when the deployment was reconciled. + It is not guaranteed to be set in happens-before order across separate + operations. It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + message: + description: A readable message indicating details about why it is + in this condition. + type: string + partyDeploymentStatuses: + additionalProperties: + additionalProperties: + description: KusciaDeploymentPartyStatus defines party status + of kuscia deployment. + properties: + availableReplicas: + description: Total number of available pods (ready for at + least minReadySeconds) targeted by this deployment. + format: int32 + type: integer + conditions: + description: Represents the latest available observations + of a deployment's current state. + items: + description: DeploymentCondition describes the state of + a deployment at a certain point. + properties: + lastTransitionTime: + description: Last time the condition transitioned from + one status to another. + format: date-time + type: string + lastUpdateTime: + description: The last time this condition was updated. + format: date-time + type: string + message: + description: A human readable message indicating details + about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, + Unknown. + type: string + type: + description: Type of deployment condition. + type: string + required: + - status + - type + type: object + type: array + creationTimestamp: + format: date-time + type: string + phase: + description: The party deployment phase. + type: string + replicas: + description: Total number of non-terminated pods targeted + by this deployment (their labels match the selector). + format: int32 + type: integer + role: + type: string + unavailableReplicas: + description: Total number of unavailable pods targeted by + this deployment. This is the total number of pods that are + still required for the deployment to have 100% available + capacity. They may either be pods that are running but not + yet available or pods that still have not been created. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted + by this deployment that have the desired template spec. + format: int32 + type: integer + required: + - availableReplicas + - replicas + - unavailableReplicas + - updatedReplicas + type: object + type: object + description: PartyDeploymentStatuses defines deployment status for + all party. + type: object + phase: + description: The phase of a KusciaDeployment is a simple, high-level + summary of where the deployment is in its lifecycle. + type: string + reason: + description: A brief CamelCase message indicating details about why + it is in this state. + type: string + totalParties: + description: Total number of parties. + type: integer + required: + - availableParties + - totalParties + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crds/v1alpha1/kuscia.secretflow_kusciabetajobs.yaml b/crds/v1alpha1/kuscia.secretflow_kusciabetajobs.yaml new file mode 100644 index 00000000..d52e503d --- /dev/null +++ b/crds/v1alpha1/kuscia.secretflow_kusciabetajobs.yaml @@ -0,0 +1,274 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: kusciabetajobs.kuscia.secretflow +spec: + group: kuscia.secretflow + names: + kind: KusciaBetaJob + listKind: KusciaBetaJobList + plural: kusciabetajobs + shortNames: + - kbj + singular: kusciabetajob + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.startTime + name: StartTime + type: date + - jsonPath: .status.completionTime + name: CompletionTime + type: date + - jsonPath: .status.lastReconcileTime + name: LastReconcileTime + type: date + - jsonPath: .status.phase + name: Phase + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: KusciaBetaJob is the Schema for the kuscia beta job API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KusciaBetaJobSpec defines the information of kuscia beta + job spec. + properties: + flowID: + description: FlowID defines the id of flow + type: string + initiator: + description: Initiator who schedule this KusciaJob. + type: string + maxParallelism: + default: 1 + description: MaxParallelism max parallelism of tasks, default 1. At + a certain moment, there may be multiple subtasks that can be scheduled. + this field defines the maximum number of tasks in the Running state. + maximum: 128 + minimum: 1 + type: integer + scheduleMode: + default: Strict + description: ScheduleMode defines how this job will be scheduled. + In Strict, if any non-tolerable subtasks failed, Scheduling for + this task stops immediately, and it immediately enters the final + Failed state. In BestEffort, if any non-tolerable subtasks failed, + Scheduling for this job will continue. But the successor subtask + of the failed subtask stops scheduling, and the current state will + be running. When all subtasks succeed or fail, the job will enter + the Failed state. + enum: + - Strict + - BestEffort + type: string + tasks: + description: Tasks defines the subtasks participating in scheduling + and their dependencies, and the subtasks and dependencies should + constitute a directed acyclic graph. During runtime, each subtask + will be created as a KusciaTask. + items: + properties: + alias: + description: Alias represents KusciaTask alias. + type: string + appImage: + description: AppImage defines image be used in KusciaTask + type: string + dependencies: + description: Dependencies defines the dependencies of this subtask. + Only when the dependencies of this subtask are all in the + Succeeded state, this subtask can be scheduled. + items: + type: string + maxItems: 128 + minItems: 1 + type: array + parties: + description: Parties defines participants and role in this KusciaTask + items: + properties: + domainID: + type: string + role: + type: string + required: + - domainID + type: object + type: array + priority: + description: Priority defines priority of ready subtask. When + multiple subtasks are ready, which one is scheduled first. + The larger the value of this field, the higher the priority. + type: integer + scheduleConfig: + description: ScheduleConfig defines the schedule config for + KusciaTask. + properties: + lifecycleSeconds: + type: integer + minReservedMembers: + minimum: 1 + type: integer + resourceReservedSeconds: + type: integer + retryIntervalSeconds: + type: integer + type: object + taskID: + description: TaskID represents KusciaTask id, it should match + rfc1123 DNS_LABEL pattern. It will be used in Dependencies. + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + taskInputConfig: + description: TaskInputConfig defines input config for KusciaTask. + type: string + tolerable: + default: false + description: Tolerable default false. If this sub-task failed, + job will not be failed. tolerable sub-task can not be other + sub-tasks dependencies. + type: boolean + required: + - alias + - appImage + - parties + - taskInputConfig + type: object + maxItems: 128 + minItems: 1 + type: array + required: + - initiator + - tasks + type: object + status: + description: KusciaJobStatus defines the observed state of kuscia job. + properties: + approveStatus: + additionalProperties: + type: string + description: job approve status of each party, if job controller is + configured with "AutoApproved", the party's approved status will + be initiated with "JobAccepted" + type: object + completionTime: + description: Represents time when the job was completed. It is not + guaranteed to be set in happens-before order across separate operations. + It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + conditions: + description: The latest available observations of an object's current + state. + items: + description: KusciaJobCondition describes current state of a kuscia + job. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human-readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of job condition. + type: string + required: + - status + - type + type: object + type: array + lastReconcileTime: + description: Represents last time when the job was reconciled. It + is not guaranteed to be set in happens-before order across separate + operations. It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + message: + description: A human-readable message indicating details about why + the job is in this condition. + type: string + partyTaskCreateStatus: + additionalProperties: + items: + description: PartyTaskCreateStatus defines party task create status. + properties: + domainID: + type: string + message: + type: string + phase: + type: string + role: + type: string + required: + - domainID + type: object + type: array + description: PartyTaskCreateStatus describes the created status of + party task. + type: object + phase: + description: The phase of a KusciaJob is a simple, high-level summary + of where the job is in its lifecycle. + type: string + reason: + description: A brief CamelCase message indicating details about why + the job is in this state. + type: string + stageStatus: + additionalProperties: + type: string + description: job stage status of each party, + type: object + startTime: + description: Represents time when the job was acknowledged by the + job controller. It is not guaranteed to be set in happens-before + order across separate operations. It is represented in RFC3339 form + and is in UTC. + format: date-time + type: string + taskStatus: + additionalProperties: + description: KusciaTaskPhase is a label for the condition of a kuscia + task at the current time. + type: string + description: TaskStatus describes subtasks state. The key is taskId. + Uncreated subtasks will not appear here. + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crds/v1alpha1/kuscia.secretflow_kusciabetatasks.yaml b/crds/v1alpha1/kuscia.secretflow_kusciabetatasks.yaml new file mode 100644 index 00000000..3e2a097b --- /dev/null +++ b/crds/v1alpha1/kuscia.secretflow_kusciabetatasks.yaml @@ -0,0 +1,2301 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: kusciabetatasks.kuscia.secretflow +spec: + group: kuscia.secretflow + names: + kind: KusciaBetaTask + listKind: KusciaBetaTaskList + plural: kusciabetatasks + shortNames: + - kbt + singular: kusciabetatask + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.startTime + name: StartTime + type: date + - jsonPath: .status.completionTime + name: CompletionTime + type: date + - jsonPath: .status.lastReconcileTime + name: LastReconcileTime + type: date + - jsonPath: .status.phase + name: Phase + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: KusciaBetaTask is the Schema for the namespace kuscia task API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KusciaTaskSpec defines the information of kuscia task spec. + properties: + initiator: + type: string + parties: + items: + description: PartyInfo defines the basic party info. + properties: + appImageRef: + type: string + domainID: + type: string + minReservedPods: + type: integer + role: + type: string + template: + description: PartyTemplate defines the specific info for party. + properties: + replicas: + format: int32 + type: integer + spec: + description: PodSpec defines the spec info of pod. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most + preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit weight + 0 (i.e. it's a no-op). A null preferred + scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. + If the affinity requirements specified by + this field cease to be met at some point during + pod execution (e.g. due to an update), the + system may or may not try to eventually evict + the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules + (e.g. co-locate this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest + sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the + set of namespaces that the term + applies to. The term is applied + to the union of the namespaces selected + by this field and the ones listed + in the namespaces field. null selector + and null or empty namespaces list + means "this pod's namespace". An + empty selector ({}) matches all + namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The term + is applied to the union of the namespaces + listed in this field and the ones + selected by namespaceSelector. null + or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in + the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. + If the affinity requirements specified by + this field cease to be met at some point during + pod execution (e.g. due to a pod label update), + the system may or may not try to eventually + evict the pod from its node. When there are + multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label query over a set + of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies + to. The term is applied to the union + of the namespaces selected by this field + and the ones listed in the namespaces + field. null selector and null or empty + namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same + node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity + expressions specified by this field, but it + may choose a node that violates one or more + of the expressions. The node that is most + preferred is the one with the greatest sum + of weights, i.e. for each node that meets + all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most + preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the + set of namespaces that the term + applies to. The term is applied + to the union of the namespaces selected + by this field and the ones listed + in the namespaces field. null selector + and null or empty namespaces list + means "this pod's namespace". An + empty selector ({}) matches all + namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The term + is applied to the union of the namespaces + listed in this field and the ones + selected by namespaceSelector. null + or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in + the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements + specified by this field are not met at scheduling + time, the pod will not be scheduled onto the + node. If the anti-affinity requirements specified + by this field cease to be met at some point + during pod execution (e.g. due to a pod label + update), the system may or may not try to + eventually evict the pod from its node. When + there are multiple elements, the lists of + nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label query over a set + of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies + to. The term is applied to the union + of the namespaces selected by this field + and the ones listed in the namespaces + field. null selector and null or empty + namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + containers: + items: + description: Container defines the container info. + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + configVolumeMounts: + items: + description: ConfigVolumeMount defines config + volume mount info. + properties: + mountPath: + type: string + subPath: + type: string + required: + - mountPath + - subPath + type: object + type: array + env: + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined + environment variables in the container + and any service environment variables. + If a variable cannot be resolved, the + reference in the input string will be + unchanged. Double $$ are reduced to a + single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, + regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be used if value + is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the + ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the + pod: supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, + status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of + the container: only resources limits + and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret + in the pod's namespace + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the + Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be + a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + imagePullPolicy: + description: PullPolicy describes a policy for + if/when to pull a container image + type: string + livenessProbe: + description: Probe describes a health check to + be performed against a container to determine + whether it is alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to + take. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed after + having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness probes + are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully upon + probe failure. The grace period is the duration + in seconds after the processes running in + the pod are sent a termination signal and + the time when the processes are forcibly + halted with a kill signal. Set this value + longer than the expected cleanup time for + your process. If this value is nil, the + pod's terminationGracePeriodSeconds will + be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + description: ContainerPort describes container + port info. + properties: + name: + type: string + port: + format: int32 + type: integer + protocol: + default: HTTP + description: PortProtocol defines the network + protocols. + enum: + - HTTP + - GRPC + type: string + scope: + default: Local + description: PortScope defines the port + usage scope. + enum: + - Cluster + - Domain + - Local + type: string + required: + - name + type: object + type: array + readinessProbe: + description: Probe describes a health check to + be performed against a container to determine + whether it is alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to + take. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed after + having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness probes + are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully upon + probe failure. The grace period is the duration + in seconds after the processes running in + the pod are sent a termination signal and + the time when the processes are forcibly + halted with a kill signal. Set this value + longer than the expected cleanup time for + your process. If this value is nil, the + pod's terminationGracePeriodSeconds will + be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: ResourceRequirements describes the + compute resource requirements. + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are + used by this container. \n This is an alpha + field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. + It can only be set for containers." + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name + of one entry in pod.spec.resourceClaims + of the Pod where this field is used. + It makes that resource available inside + a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum + amount of compute resources allowed. More + info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If + Requests is omitted for a container, it + defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: SecurityContext only privileged works + now. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges + than its parent process. This bool directly + controls if the no_new_privs flag will be + set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) + run as Privileged 2) has CAP_SYS_ADMIN Note + that this field cannot be set when spec.os.name + is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop + when running containers. Defaults to the + default set of capabilities granted by the + container runtime. Note that this field + cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX + capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX + capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. + Processes in privileged containers are essentially + equivalent to root on the host. Defaults + to false. Note that this field cannot be + set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of + proc mount to use for the containers. The + default is DefaultProcMount which uses the + container runtime defaults for readonly + paths and masked paths. This requires the + ProcMountType feature flag to be enabled. + Note that this field cannot be set when + spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a + read-only root filesystem. Default is false. + Note that this field cannot be set when + spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint + of the container process. Uses runtime default + if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. Note that this field cannot + be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container + must run as a non-root user. If true, the + Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 + (root) and fail to start the container if + it does. If unset or false, no such validation + will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint + of the container process. Defaults to user + specified in image metadata if unspecified. + May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. Note that this field cannot + be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied + to the container. If unspecified, the container + runtime will allocate a random SELinux context + for each container. May also be set in + PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name + is windows. + properties: + level: + description: Level is SELinux level label + that applies to the container. + type: string + role: + description: Role is a SELinux role label + that applies to the container. + type: string + type: + description: Type is a SELinux type label + that applies to the container. + type: string + user: + description: User is a SELinux user label + that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by + this container. If seccomp options are provided + at both the pod & container level, the container + options override the pod options. Note that + this field cannot be set when spec.os.name + is windows. + properties: + localhostProfile: + description: localhostProfile indicates + a profile defined in a file on the node + should be used. The profile must be + preconfigured on the node to work. Must + be a descending path, relative to the + kubelet's configured seccomp profile + location. Must only be set if type is + "Localhost". + type: string + type: + description: "type indicates which kind + of seccomp profile will be applied. + Valid options are: \n Localhost - a + profile defined in a file on the node + should be used. RuntimeDefault - the + container runtime default profile should + be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings + applied to all containers. If unspecified, + the options from the PodSecurityContext + will be used. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name + is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where + the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName + field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is + the name of the GMSA credential spec + to use. + type: string + hostProcess: + description: HostProcess determines if + a container should be run as a 'Host + Process' container. This field is alpha-level + and will only be honored by components + that enable the WindowsHostProcessContainers + feature flag. Setting this field without + the feature flag will result in errors + when validating the Pod. All of a Pod's + containers must have the same effective + HostProcess value (it is not allowed + to have a mix of HostProcess containers + and non-HostProcess containers). In + addition, if HostProcess is true then + HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to + run the entrypoint of the container + process. Defaults to the user specified + in image metadata if unspecified. May + also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probe describes a health check to + be performed against a container to determine + whether it is alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to + take. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed after + having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness probes + are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully upon + probe failure. The grace period is the duration + in seconds after the processes running in + the pod are sent a termination signal and + the time when the processes are forcibly + halted with a kill signal. Set this value + longer than the expected cleanup time for + your process. If this value is nil, the + pod's terminationGracePeriodSeconds will + be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + workingDir: + type: string + required: + - name + type: object + type: array + restartPolicy: + description: 'Restart policy for all containers within + the pod. One of Always, OnFailure, Never. Default + to Never. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + type: string + type: object + type: object + required: + - appImageRef + - domainID + type: object + type: array + scheduleConfig: + description: ScheduleConfig defines the config for scheduling. + properties: + lifecycleSeconds: + type: integer + minReservedMembers: + minimum: 1 + type: integer + resourceReservedSeconds: + type: integer + retryIntervalSeconds: + type: integer + type: object + taskInputConfig: + type: string + required: + - initiator + - parties + - taskInputConfig + type: object + status: + description: KusciaTaskStatus defines the observed state of kuscia task. + properties: + completionTime: + description: Represents time when the task was completed. It is not + guaranteed to be set in happens-before order across separate operations. + It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + conditions: + description: The latest available observations of an object's current + state. + items: + description: KusciaTaskCondition describes current state of a kuscia + task. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human-readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of task condition. + type: string + required: + - status + - type + type: object + type: array + lastReconcileTime: + description: Represents last time when the task was reconciled. It + is not guaranteed to be set in happens-before order across separate + operations. It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + message: + description: A human-readable message indicating details about why + the task is in this condition. + type: string + partyTaskStatus: + description: PartyTaskStatus defines task status for all party. + items: + description: PartyTaskStatus defines party task status. + properties: + domainID: + type: string + message: + type: string + phase: + description: KusciaTaskPhase is a label for the condition of + a kuscia task at the current time. + type: string + role: + type: string + required: + - domainID + type: object + type: array + phase: + description: The phase of a KusciaTask is a simple, high-level summary + of where the task is in its lifecycle. + type: string + podStatuses: + additionalProperties: + description: PodStatus describes pod status. + properties: + createTime: + description: Represents time when the pod was created. It is + represented in RFC3339 form and is in UTC. + format: date-time + type: string + message: + description: A human-readable message indicating details about + why the pod is in this condition. + type: string + namespace: + description: Pod's namespace. + type: string + nodeName: + description: Pod's node name. + type: string + podName: + description: Pod name. + type: string + podPhase: + description: The phase of a Pod is a simple, high-level summary + of where the Pod is in its lifecycle. + type: string + readyTime: + description: Represents time when the pod was ready. It is represented + in RFC3339 form and is in UTC. + format: date-time + type: string + reason: + description: A brief CamelCase message indicating details about + why the pod is in this state. e.g. 'Evicted' + type: string + scheduleTime: + description: Represents time when the pod was scheduled. It + is represented in RFC3339 form and is in UTC. + format: date-time + type: string + startTime: + description: Represents time when the pod was accepted by the + agent. It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + terminationLog: + description: The latest stdout/stderr message if app exit fail. + type: string + required: + - namespace + - podName + - podPhase + type: object + description: PodStatuses is map of ns/name and PodStatus, specifies + the status of each pod. + type: object + reason: + description: A brief CamelCase message indicating details about why + the task is in this state. + type: string + serviceStatuses: + additionalProperties: + description: ServiceStatus describes service status. + properties: + createTime: + description: Represents time when the service was created. It + is represented in RFC3339 form and is in UTC. + format: date-time + type: string + message: + description: A human-readable message indicating details about + why the service is in this condition. + type: string + namespace: + description: Service's namespace. + type: string + portName: + description: Service's port name which defined in AppImage container + port. + type: string + portNumber: + description: Service's port number which defined in AppImage + container port. + format: int32 + type: integer + readyTime: + description: Represents time when the service was ready. It + is represented in RFC3339 form and is in UTC. + format: date-time + type: string + reason: + description: A brief CamelCase message indicating details about + why the service is in this state. e.g. 'Evicted' + type: string + scope: + description: Service's port scope which defined in AppImage + container port. + type: string + serviceName: + description: Service name. + type: string + required: + - namespace + - serviceName + type: object + description: ServiceStatuses is map of ns/name and ServiceStatus, + specifies the status of each service. + type: object + startTime: + description: Represents time when the task was acknowledged by the + task controller. It is not guaranteed to be set in happens-before + order across separate operations. It is represented in RFC3339 form + and is in UTC. + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crds/v1alpha1/kuscia.secretflow_kusciadeploymentsummaries.yaml b/crds/v1alpha1/kuscia.secretflow_kusciadeploymentsummaries.yaml new file mode 100644 index 00000000..9ec4beb1 --- /dev/null +++ b/crds/v1alpha1/kuscia.secretflow_kusciadeploymentsummaries.yaml @@ -0,0 +1,181 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: kusciadeploymentsummaries.kuscia.secretflow +spec: + group: kuscia.secretflow + names: + kind: KusciaDeploymentSummary + listKind: KusciaDeploymentSummaryList + plural: kusciadeploymentsummaries + shortNames: + - kds + singular: kusciadeploymentsummary + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.totalParties + name: TotalParties + type: integer + - jsonPath: .status.availableParties + name: AvailableParties + type: integer + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: KusciaDeploymentSummary is used to sync deployment status between + clusters + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KusciaDeploymentSummarySpec defines the information of kuscia + deployment spec. + properties: + KusciaDeploymentID: + type: string + required: + - KusciaDeploymentID + type: object + status: + description: KusciaDeploymentStatus defines the observed state of kuscia + deployment. + properties: + availableParties: + description: Total number of available parties. + type: integer + lastReconcileTime: + description: Represents last time when the deployment was reconciled. + It is not guaranteed to be set in happens-before order across separate + operations. It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + message: + description: A readable message indicating details about why it is + in this condition. + type: string + partyDeploymentStatuses: + additionalProperties: + additionalProperties: + description: KusciaDeploymentPartyStatus defines party status + of kuscia deployment. + properties: + availableReplicas: + description: Total number of available pods (ready for at + least minReadySeconds) targeted by this deployment. + format: int32 + type: integer + conditions: + description: Represents the latest available observations + of a deployment's current state. + items: + description: DeploymentCondition describes the state of + a deployment at a certain point. + properties: + lastTransitionTime: + description: Last time the condition transitioned from + one status to another. + format: date-time + type: string + lastUpdateTime: + description: The last time this condition was updated. + format: date-time + type: string + message: + description: A human readable message indicating details + about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, + Unknown. + type: string + type: + description: Type of deployment condition. + type: string + required: + - status + - type + type: object + type: array + creationTimestamp: + format: date-time + type: string + phase: + description: The party deployment phase. + type: string + replicas: + description: Total number of non-terminated pods targeted + by this deployment (their labels match the selector). + format: int32 + type: integer + role: + type: string + unavailableReplicas: + description: Total number of unavailable pods targeted by + this deployment. This is the total number of pods that are + still required for the deployment to have 100% available + capacity. They may either be pods that are running but not + yet available or pods that still have not been created. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted + by this deployment that have the desired template spec. + format: int32 + type: integer + required: + - availableReplicas + - replicas + - unavailableReplicas + - updatedReplicas + type: object + type: object + description: PartyDeploymentStatuses defines deployment status for + all party. + type: object + phase: + description: The phase of a KusciaDeployment is a simple, high-level + summary of where the deployment is in its lifecycle. + type: string + reason: + description: A brief CamelCase message indicating details about why + it is in this state. + type: string + totalParties: + description: Total number of parties. + type: integer + required: + - availableParties + - totalParties + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crds/v1alpha1/kuscia.secretflow_kusciajobs.yaml b/crds/v1alpha1/kuscia.secretflow_kusciajobs.yaml index c9aca460..44657966 100644 --- a/crds/v1alpha1/kuscia.secretflow_kusciajobs.yaml +++ b/crds/v1alpha1/kuscia.secretflow_kusciajobs.yaml @@ -165,6 +165,13 @@ spec: status: description: KusciaJobStatus defines the observed state of kuscia job. properties: + approveStatus: + additionalProperties: + type: string + description: job approve status of each party, if job controller is + configured with "AutoApproved", the party's approved status will + be initiated with "JobAccepted" + type: object completionTime: description: Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. @@ -211,6 +218,26 @@ spec: description: A human-readable message indicating details about why the job is in this condition. type: string + partyTaskCreateStatus: + additionalProperties: + items: + description: PartyTaskCreateStatus defines party task create status. + properties: + domainID: + type: string + message: + type: string + phase: + type: string + role: + type: string + required: + - domainID + type: object + type: array + description: PartyTaskCreateStatus describes the created status of + party task. + type: object phase: description: The phase of a KusciaJob is a simple, high-level summary of where the job is in its lifecycle. @@ -219,6 +246,11 @@ spec: description: A brief CamelCase message indicating details about why the job is in this state. type: string + stageStatus: + additionalProperties: + type: string + description: job stage status of each party, + type: object startTime: description: Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before diff --git a/crds/v1alpha1/kuscia.secretflow_kusciajobsummaries.yaml b/crds/v1alpha1/kuscia.secretflow_kusciajobsummaries.yaml new file mode 100644 index 00000000..c42026f8 --- /dev/null +++ b/crds/v1alpha1/kuscia.secretflow_kusciajobsummaries.yaml @@ -0,0 +1,172 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: kusciajobsummaries.kuscia.secretflow +spec: + group: kuscia.secretflow + names: + kind: KusciaJobSummary + listKind: KusciaJobSummaryList + plural: kusciajobsummaries + shortNames: + - kjs + singular: kusciajobsummary + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.startTime + name: StartTime + type: date + - jsonPath: .status.completionTime + name: CompletionTime + type: date + - jsonPath: .status.lastReconcileTime + name: LastReconcileTime + type: date + - jsonPath: .status.phase + name: Phase + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: KusciaJobSummary is used to sync job status between clusters + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + stage: + default: Create + description: Stage defines the current situation of a job. + type: string + stageTrigger: + description: StageTrigger refers to the party who trigger current + stage + type: string + type: object + status: + description: KusciaJobStatus defines the observed state of kuscia job. + properties: + approveStatus: + additionalProperties: + type: string + description: job approve status of each party, if job controller is + configured with "AutoApproved", the party's approved status will + be initiated with "JobAccepted" + type: object + completionTime: + description: Represents time when the job was completed. It is not + guaranteed to be set in happens-before order across separate operations. + It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + conditions: + description: The latest available observations of an object's current + state. + items: + description: KusciaJobCondition describes current state of a kuscia + job. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human-readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of job condition. + type: string + required: + - status + - type + type: object + type: array + lastReconcileTime: + description: Represents last time when the job was reconciled. It + is not guaranteed to be set in happens-before order across separate + operations. It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + message: + description: A human-readable message indicating details about why + the job is in this condition. + type: string + partyTaskCreateStatus: + additionalProperties: + items: + description: PartyTaskCreateStatus defines party task create status. + properties: + domainID: + type: string + message: + type: string + phase: + type: string + role: + type: string + required: + - domainID + type: object + type: array + description: PartyTaskCreateStatus describes the created status of + party task. + type: object + phase: + description: The phase of a KusciaJob is a simple, high-level summary + of where the job is in its lifecycle. + type: string + reason: + description: A brief CamelCase message indicating details about why + the job is in this state. + type: string + stageStatus: + additionalProperties: + type: string + description: job stage status of each party, + type: object + startTime: + description: Represents time when the job was acknowledged by the + job controller. It is not guaranteed to be set in happens-before + order across separate operations. It is represented in RFC3339 form + and is in UTC. + format: date-time + type: string + taskStatus: + additionalProperties: + description: KusciaTaskPhase is a label for the condition of a kuscia + task at the current time. + type: string + description: TaskStatus describes subtasks state. The key is taskId. + Uncreated subtasks will not appear here. + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} diff --git a/crds/v1alpha1/kuscia.secretflow_kusciatasks.yaml b/crds/v1alpha1/kuscia.secretflow_kusciatasks.yaml index 0f5f2618..efb3f944 100644 --- a/crds/v1alpha1/kuscia.secretflow_kusciatasks.yaml +++ b/crds/v1alpha1/kuscia.secretflow_kusciatasks.yaml @@ -2251,6 +2251,15 @@ spec: namespace: description: Service's namespace. type: string + portName: + description: Service's port name which defined in AppImage container + port. + type: string + portNumber: + description: Service's port number which defined in AppImage + container port. + format: int32 + type: integer readyTime: description: Represents time when the service was ready. It is represented in RFC3339 form and is in UTC. @@ -2260,6 +2269,10 @@ spec: description: A brief CamelCase message indicating details about why the service is in this state. e.g. 'Evicted' type: string + scope: + description: Service's port scope which defined in AppImage + container port. + type: string serviceName: description: Service name. type: string diff --git a/crds/v1alpha1/kuscia.secretflow_kusciatasksummaries.yaml b/crds/v1alpha1/kuscia.secretflow_kusciatasksummaries.yaml new file mode 100644 index 00000000..a86a3115 --- /dev/null +++ b/crds/v1alpha1/kuscia.secretflow_kusciatasksummaries.yaml @@ -0,0 +1,297 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: kusciatasksummaries.kuscia.secretflow +spec: + group: kuscia.secretflow + names: + kind: KusciaTaskSummary + listKind: KusciaTaskSummaryList + plural: kusciatasksummaries + shortNames: + - kts + singular: kusciatasksummary + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.taskID + name: TaskID + type: string + - jsonPath: .spec.jobID + name: JobID + type: string + - jsonPath: .status.phase + name: Phase + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: KusciaTaskSummary is used to sync task status between clusters + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KusciaTaskSummarySpec defines the information of kuscia task + spec. + properties: + alias: + type: string + jobID: + type: string + required: + - alias + - jobID + type: object + status: + properties: + completionTime: + description: Represents time when the task was completed. It is not + guaranteed to be set in happens-before order across separate operations. + It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + conditions: + description: The latest available observations of an object's current + state. + items: + description: KusciaTaskCondition describes current state of a kuscia + task. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human-readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of task condition. + type: string + required: + - status + - type + type: object + type: array + lastReconcileTime: + description: Represents last time when the task was reconciled. It + is not guaranteed to be set in happens-before order across separate + operations. It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + message: + description: A human-readable message indicating details about why + the task is in this condition. + type: string + partyTaskStatus: + description: PartyTaskStatus defines task status for all party. + items: + description: PartyTaskStatus defines party task status. + properties: + domainID: + type: string + message: + type: string + phase: + description: KusciaTaskPhase is a label for the condition of + a kuscia task at the current time. + type: string + role: + type: string + required: + - domainID + type: object + type: array + phase: + description: The phase of a KusciaTask is a simple, high-level summary + of where the task is in its lifecycle. + type: string + podStatuses: + additionalProperties: + description: PodStatus describes pod status. + properties: + createTime: + description: Represents time when the pod was created. It is + represented in RFC3339 form and is in UTC. + format: date-time + type: string + message: + description: A human-readable message indicating details about + why the pod is in this condition. + type: string + namespace: + description: Pod's namespace. + type: string + nodeName: + description: Pod's node name. + type: string + podName: + description: Pod name. + type: string + podPhase: + description: The phase of a Pod is a simple, high-level summary + of where the Pod is in its lifecycle. + type: string + readyTime: + description: Represents time when the pod was ready. It is represented + in RFC3339 form and is in UTC. + format: date-time + type: string + reason: + description: A brief CamelCase message indicating details about + why the pod is in this state. e.g. 'Evicted' + type: string + scheduleTime: + description: Represents time when the pod was scheduled. It + is represented in RFC3339 form and is in UTC. + format: date-time + type: string + startTime: + description: Represents time when the pod was accepted by the + agent. It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + terminationLog: + description: The latest stdout/stderr message if app exit fail. + type: string + required: + - namespace + - podName + - podPhase + type: object + description: PodStatuses is map of ns/name and PodStatus, specifies + the status of each pod. + type: object + reason: + description: A brief CamelCase message indicating details about why + the task is in this state. + type: string + resourceStatus: + additionalProperties: + description: TaskResourceStatus defines the details of task resource + status. + properties: + completionTime: + format: date-time + type: string + conditions: + items: + description: TaskResourceCondition defines the details of + task resource condition. + properties: + lastTransitionTime: + format: date-time + type: string + reason: + type: string + status: + type: string + type: + description: TaskResourceConditionType is a valid value + for a task resource condition type. + type: string + required: + - status + - type + type: object + type: array + lastTransitionTime: + format: date-time + type: string + phase: + description: TaskResourcePhase is a label for the condition + of a task resource at the current time. + type: string + startTime: + format: date-time + type: string + required: + - phase + type: object + description: resourceStatus refers to each party resource status + type: object + serviceStatuses: + additionalProperties: + description: ServiceStatus describes service status. + properties: + createTime: + description: Represents time when the service was created. It + is represented in RFC3339 form and is in UTC. + format: date-time + type: string + message: + description: A human-readable message indicating details about + why the service is in this condition. + type: string + namespace: + description: Service's namespace. + type: string + portName: + description: Service's port name which defined in AppImage container + port. + type: string + portNumber: + description: Service's port number which defined in AppImage + container port. + format: int32 + type: integer + readyTime: + description: Represents time when the service was ready. It + is represented in RFC3339 form and is in UTC. + format: date-time + type: string + reason: + description: A brief CamelCase message indicating details about + why the service is in this state. e.g. 'Evicted' + type: string + scope: + description: Service's port scope which defined in AppImage + container port. + type: string + serviceName: + description: Service name. + type: string + required: + - namespace + - serviceName + type: object + description: ServiceStatuses is map of ns/name and ServiceStatus, + specifies the status of each service. + type: object + startTime: + description: Represents time when the task was acknowledged by the + task controller. It is not guaranteed to be set in happens-before + order across separate operations. It is represented in RFC3339 form + and is in UTC. + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/docs/deployment/K8s_deployment_kuscia/K8s_master_lite_cn.md b/docs/deployment/K8s_deployment_kuscia/K8s_master_lite_cn.md new file mode 100644 index 00000000..819b2e59 --- /dev/null +++ b/docs/deployment/K8s_deployment_kuscia/K8s_master_lite_cn.md @@ -0,0 +1,171 @@ +# 部署中心化集群 + +## 前言 +本教程帮助你在 k8s 集群上使用 [中心化组网模式](../../reference/architecture_cn.md#中心化组网模式) 来部署 Kuscia 集群。目前 kuscia 在部署到 k8s 上时,隐私计算任务的运行态仅支持 runk 模式,runp 模式正在开发中,详细参考[容器运行模式](../../reference/architecture_cn.md#agent);runc 模式目前需要部署 kuscia 的 pod 有特权容器,暂时不是特别推荐,所以下文默认的模式以 runk 模式来进行部署(需要能够有权限在宿主的 K8s 上拉起任务 pod)。 +> Tips:k8s 部署模式暂不支持训练,仅支持预测服务 + +## 部署 master +部署 master 需提前准备好 mysql 数据库,数据库帐号密码等信息配置在步骤三 Configmap 中(database 需要提前手动创建好并且 mysql 账户需要具有创建表的权限) + +### 步骤一:创建 Namespace +> 创建 namespace 需要先获取 create 权限,避免出现 "namespaces is forbidden" 报错 + +namespace 名称可以按照自己的意愿来定,也可以复用已经有的,下文以 kuscia-master 为例(namespace 名称需要与 yaml 文件里的 namespace 字段对应起来) +```bash +kubectl create ns kuscia-master +``` + +### 步骤二:创建 Service + +获取 [service.yaml](https://github.com/secretflow/kuscia/blob/main/hack/k8s/master/service.yaml) 文件,创建 service +```bash +kubectl create -f service.yaml +``` + +### 步骤三:创建 Configmap +ConfigMap 是用来配置 kuscia 的配置文件,详细的配置文件介绍参考[kuscia配置](../kuscia_config_cn.md) + +domainID、私钥以及 datastoreEndpoint 字段里的数据库连接串(user、password、host、database)需要替换成真实有效的信息,私钥可以通过命令 `docker run -it --rm secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow/kuscia scripts/deploy/generate_rsa_key.sh` 生成 +> 注意:
+1、database 名称暂不支持 "-" 特殊字符
+2、目前节点私钥仅支持 pkcs#1 格式: "BEGIN RSA PRIVATE KEY/END RSA PRIVATE KEY"
+3、修改 Configmap 配置后,需执行 kubectl delete po \${pod-name} -n \${namespace} 重新拉起 pod 生效 + +注意:节点 id 需要符合 DNS 子域名规则要求,详情请参考[这里](https://kubernetes.io/zh-cn/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names) + +获取 [configmap.yaml](https://github.com/secretflow/kuscia/blob/main/hack/k8s/master/configmap.yaml) 文件,创建 configmap;因为这里面涉及很多敏感配置,请在生产时务必重新配置,不使用默认配置。 + +```bash +kubectl create -f configmap.yaml +``` + +### 步骤四:创建 Deployment + +获取 [deployment.yaml](https://github.com/secretflow/kuscia/blob/main/hack/k8s/master/deployment.yaml) 文件,创建 deployment +```bash +kubectl create -f deployment.yaml +``` + +## 部署 lite + +### 步骤一:创建 Namespace +> 创建 namespace 需要先获取 create 权限,避免出现 "namespaces is forbidden" 报错 + +namespace 名称可以按照自己的意愿来定,也可以复用已经有的,下文以 lite-alice 为例(namespace 名称需要与 yaml 文件里的 namespace 字段对应起来) +```bash +kubectl create ns lite-alice +``` + +### 步骤二:创建 Service + +获取 [service.yaml](https://github.com/secretflow/kuscia/blob/main/hack/k8s/lite/service.yaml) 文件,如果 master 与 lite 不在一个 k8s 集群内,可以将 master service 的端口暴露方式改为 LoadBalancer(公有云,例如:阿里云) 或者 NodePort,并在 configmap 的 masterEndpoint 字段改为可正常访问的地址,创建 service +```bash +kubectl create -f service.yaml +``` + +### 步骤三:创建 Configmap +ConfigMap 是用来配置 kuscia 的配置文件,详细的配置文件介绍参考[kuscia配置](../kuscia_config_cn.md) + +部署 configmap 需要提前在 master 节点 pod 内生成 domainID 以及 token,并填写到 configmap 的 domainID 和 liteDeployToken 字段中,私钥可以通过命令 `docker run -it --rm secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow/kuscia scripts/deploy/generate_rsa_key.sh` 生成并填写到 domainKeyData 字段中 +> 注意:
+1、目前节点私钥仅支持 pkcs#1 格式: "BEGIN RSA PRIVATE KEY/END RSA PRIVATE KEY"
+2、修改 Configmap 配置后,需执行 kubectl delete po \${pod-name} -n \${namespace} 重新拉起 pod 生效 + +注意:节点 id 需要符合 DNS 子域名规则要求,详情请参考[这里](https://kubernetes.io/zh-cn/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names) + +lite-bob 配置与 lite-alice 一样,下面以 alice 为例: +```bash +kubectl exec -it ${master_pod_name} bash -n kuscia-master +scripts/deploy/add_domain_lite.sh alice +# 示例 token +BMC4xjNqa7uAmWmyXLuJ4rrZw6brZeax +# 如果token遗忘了,可以通过该命令重新获取 +kubectl get domain alice -o=jsonpath='{.status.deployTokenStatuses[?(@.state=="unused")].token}' && echo +``` + +特殊说明:为了使 ServiceAccount 具有创建、查看、删除等资源权限,runk 模式提供两种方式: +- 方式一:在 configmap 的 kubeconfigFile 字段配置具有同等权限的 kubeconfig +- 方式二:不配置 kubeconfigFile,执行步骤四,创建具有所需权限的 role 和 RoleBinding + + +获取 [configmap.yaml](https://github.com/secretflow/kuscia/blob/main/hack/k8s/lite/configmap.yaml) 文件,创建 configmap;因为这里面涉及很多敏感配置,请在生产时务必重新配置,不使用默认配置。 +```bash +kubectl create -f comfigmap.yaml +``` + +### 步骤四(可选):创建 rbac.yaml + +获取 [rbac.yaml](https://github.com/secretflow/kuscia/blob/main/hack/k8s/lite/rbac.yaml) 文件,创建 role 和 RoleBinding +```bash +kubectl create -f rbac.yaml +``` + +### 步骤五:创建 Deployment +拉起 lite pod 前可以先检测下与 master 之前的通信是否正常 + +建议使用 curl -kvvv http://kuscia-master.kuscia-master.svc.cluster.local:1080;(此处以 http 为例,https 可以删除 master configmap 里的 protocol: NOTLS 字段,重启 pod 生效。LoadBalancer 或者 NodePort 方式可以用 curl -kvvv http://ip:port)检查一下是否访问能通,正常情况下返回的 http 错误码是401,内容是:unauthorized + +示例如下: +```bash +* Rebuilt URL to: http://kuscia-master.kuscia-master.svc.cluster.local:1080/ +* Trying 192.168.72.65... +* TCP_NODELAY set +* Connected to kuscia-master.kuscia-master.svc.cluster.local (192.168.72.65) port 1080 (#0) +> GET / HTTP/1.1 +> Host: kuscia-master.kuscia-master.svc.cluster.local:1080 +> User-Agent: curl/7.61.1 +> Accept: */* +> +< HTTP/1.1 401 Unauthorized +< x-accel-buffering: no +< content-length: 13 +< content-type: text/plain +< kuscia-error-message: Domain kuscia-system.kuscia-master-7d588b4577-9zxbs<--kuscia-master.kuscia-master.svc.cluster.local return http code 401. +< date: Wed, 29 Nov 2023 07:59:04 GMT +< server: kuscia-gateway +< +* Connection #0 to host kuscia-master.kuscia-master.svc.cluster.local left intact +unauthorized. +``` + +注意:如果 master 的入口网络存在网关时,为了确保节点与 master 之间通信正常,需要网关符合一些要求,详情请参考[这里](../networkrequirements.md) + +获取 [deployment.yaml](https://github.com/secretflow/kuscia/blob/main/hack/k8s/lite/deployment.yaml) 文件,创建 deployment +```bash +kubectl create -f deployement.yaml +``` + +### 创建 lite-alice、lite-bob 之间的授权 +> PS:目前因为安全性和时间因素,节点之间授权还是需要很多手动的操作,未来会优化。 + +```bash +# 登录 master +kubectl exec -it ${master_pod_name} bash -n kuscia-master +# 创建 alice 到 bob 的授权 +scripts/deploy/create_cluster_domain_route.sh alice bob http://kuscia-lite-bob.lite-bob.svc.cluster.local:1080 +# 创建 bob 到 alice 的授权 +scripts/deploy/create_cluster_domain_route.sh bob alice http://kuscia-lite-alice.lite-alice.svc.cluster.local:1080 +# 执行以下命令,查看是否有内容,如果有说明 alice 到 bob 授权建立成功。 +kubectl get cdr alice-bob -o=jsonpath="{.status.tokenStatus.sourceTokens[*]}" +# 执行以下命令,查看是否有内容,如果有说明 bob 到 alice 授权建立成功 +kubectl get cdr bob-alice -o=jsonpath="{.status.tokenStatus.sourceTokens[*]}" +``` +在执行 master pod 内执行 `kubectl get cdr` 返回 Ready 为 True 时,表示授权成功,示例如下: +```bash +NAME SOURCE DESTINATION HOST AUTHENTICATION READY +alice-kuscia-system alice kuscia-system Token True +bob-kuscia-system bob kuscia-system Token True +alice-bob alice bob kuscia-lite-bob.lite-bob.svc.cluster.local Token True +bob-alice bob alice kuscia-lite-alice.lite-alice.svc.cluster.local Token True +``` +授权失败,请参考[授权错误排查](../../reference/troubleshoot/networkauthorizationcheck.md)文档 + +## 确认部署成功 +### 检查 pod 状态 +pod 处于 running 状态表示部署成功 +```bash +kuebctl get po -n kuscia-master +kubectl get po -n lite-alice +``` +### 检查数据库连接状态 +数据库内生成表格 kine 并且有数据表示数据库连接成功 \ No newline at end of file diff --git a/docs/deployment/K8s_deployment_kuscia/K8s_p2p_cn.md b/docs/deployment/K8s_deployment_kuscia/K8s_p2p_cn.md new file mode 100644 index 00000000..f150415f --- /dev/null +++ b/docs/deployment/K8s_deployment_kuscia/K8s_p2p_cn.md @@ -0,0 +1,124 @@ +# 部署点对点集群 + +## 前言 +本教程帮助你在 k8s 集群上使用 [点对点组网模式](../../reference/architecture_cn.md#点对点组网模式) 来部署 Kuscia 集群。目前 kuscia 在部署到 k8s 上时,隐私计算任务的运行态仅支持 runk 模式,runp 模式正在开发中,详细参考[容器运行模式](../../reference/architecture_cn.md#agent);runc 模式目前需要部署 kuscia 的 pod 有特权容器,暂时不是特别推荐,所以下文默认的模式以 runk 模式来进行部署(需要能够有权限在宿主的 K8s 上拉起任务 pod)。 +> Tips:k8s 部署模式暂不支持训练,仅支持预测服务 + +## 部署 autonomy +部署 autonomy 需提前准备好 mysql 数据库,数据库帐号密码等信息配置在步骤三 Configmap 中(database 需要提前手动创建好并且 mysql 账户需要具有创建表的权限) + +### 步骤一:创建 Namespace +> 创建 namespace 需要先获取 create 权限,避免出现 "namespaces is forbidden" 报错 + +namespace 名称可以按照自己的意愿来定,也可以复用已经有的,下文以 autonomy-alice 为例(namespace 名称需要与 yaml 文件里的 namespace 字段对应起来) +```bash +kubectl create ns autonomy-alice +``` + +### 步骤二:创建 Service + +获取 [service.yaml](https://github.com/secretflow/kuscia/blob/main/hack/k8s/autonomy/service.yaml) 文件,创建这个 service +```bash +kubectl create -f service.yaml +``` + +### 步骤三:创建 Configmap +ConfigMap 是用来配置 kuscia 的配置文件,详细的配置文件介绍参考[kuscia配置](../kuscia_config_cn.md) + +domainID、私钥以及 datastoreEndpoint 字段里的数据库连接串(user、password、host、database)需要替换成真实有效的信息,私钥可以通过命令 `docker run -it --rm secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow/kuscia scripts/deploy/generate_rsa_key.sh`生成 +> 注意:
+1、database 名称暂不支持 "-" 特殊字符
+2、目前节点私钥仅支持 pkcs#1 格式: "BEGIN RSA PRIVATE KEY/END RSA PRIVATE KEY"
+3、修改 Configmap 配置后,需执行 kubectl delete po \${pod-name} -n \${namespace} 重新拉起 pod 生效 + +注意:节点 id 需要符合 DNS 子域名规则要求,详情请参考[这里](https://kubernetes.io/zh-cn/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names) + +特殊说明:为了使 ServiceAccount 具有创建、查看、删除等资源权限,runk 模式提供两种方式: +- 方式一:在 configmap 的 kubeconfigFile 字段配置具有同等权限的 kubeconfig +- 方式二:不配置 kubeconfigFile,执行步骤四,创建具有所需权限的 role 和 RoleBinding + +获取 [configmap.yaml](https://github.com/secretflow/kuscia/blob/main/hack/k8s/autonomy/configmap.yaml) 文件,创建这个 configmap;因为这里面涉及很多敏感配置,请在生产时务必重新配置,不使用默认配置。 +```bash +kubectl create -f configmap.yaml +``` + +### 步骤四(可选):创建 rbac.yaml + +获取 [rbac.yaml](https://github.com/secretflow/kuscia/blob/main/hack/k8s/autonomy/rbac.yaml) 文件,创建 role 和 RoleBinding +```bash +kubectl create -f rbac.yaml +``` + +### 步骤四:创建 Deployment + +获取 [deployment-autonomy.yaml](https://github.com/secretflow/kuscia/blob/main/hack/k8s/autonomy/deployment.yaml) 文件里,创建这个 deployment +```bash +kubectl create -f deployment.yaml +``` + +### 创建 autonomy-alice、autonomy-bob 之间的授权 +> PS:目前因为安全性和时间因素,节点之间授权还是需要很多手动的操作,未来会优化。 + +alice 和 bob 授权之前可以先检测下相互之间的通信是否正常 + +建议使用 curl -kvvv http://kuscia-autonomy-bob.autonomy-bob.svc.cluster.local:1080;(此处以 http 为例,https 可以删除 configmap 里的 protocol: NOTLS 字段,重启 pod 生效。LoadBalancer 或者 NodePort 方式可以用 curl -kvvv http://ip:port)检查一下是否访问能通,正常情况下返回的 http 错误码是401,内容是:unauthorized + +示例参考[这里](../K8s_deployment_kuscia/K8s_master_lite_cn.md#id6) + +注意:如果 alice/bob 的入口网络存在网关时,为了确保节点之间通信正常,需要网关符合一些要求,详情请参考[这里](../networkrequirements.md) + +建立 alice 到 bob 授权 +```bash +# 将 alice 节点的 domain.crt 证书 cp 到 跳板机当前目录并改名 alice.domain.crt +kubectl cp autonomy-alice/kuscia-autonomy-alice-686d6747c-gc2kk:var/certs/domain.crt alice.domain.crt +# 将 alice.domain.crt 证书 cp 到 bob 节点的里 +kubectl cp alice.domain.crt autonomy-bob/kuscia-autonomy-bob-89cf8bc77-cvn9f:var/certs/ +# 登录到 bob 节点 +kubectl exec -it kuscia-autonomy-bob-89cf8bc77-cvn9f bash -n autonomy-bob +# 在 bob 里添加 alice 的证书等信息 +scripts/deploy/add_domain.sh alice p2p +# 登录到 alice 节点 +kubectl exec -it kuscia-autonomy-alice-686d6747c-gc2kk bash -n autonomy-alice +# 建立 alice 到 bob 的通信 +cripts/deploy/join_to_host.sh alice bob http://kuscia-autonomy-bob.autonomy-bob.svc.cluster.local:1080 +``` + +建立 bob 到 alice 授权 +```bash +# 将 bob 节点的 domain.crt 证书 cp 到 跳板机当前目录并改 bob.domain.crt +kubectl cp autonomy-bob/kuscia-autonomy-bob-89cf8bc77-cvn9f:var/certs/domain.crt bob.domain.crt +# 将 bob.domain.crt 证书 cp 到 alice 节点的里 +kubectl cp bob.domain.crt autonomy-alice/kuscia-autonomy-alice-686d6747c-h78lr:var/certs/ +# 登录到 alice 节点 +kubectl exec -it kuscia-autonomy-alice-686d6747c-h78lr bash -n autonomy-alice +# 在 alice 里添加 bob 的证书等信息 +scripts/deploy/add_domain.sh bob p2p +# 登录到 bob 节点 +kubectl exec -it kuscia-autonomy-bob-89cf8bc77-cvn9f bash -n autonomy-bob +# 建立 bob 到 alice 的通信 +scripts/deploy/join_to_host.sh bob alice http://kuscia-autonomy-alice.autonomy-alice.svc.cluster.local:1080 +``` + +检查双方授权状态 + +在 alice 节点内执行 `kubectl get cdr alice-bob -o=jsonpath="{.status.tokenStatus.sourceTokens[*]}"`,在 bob 节点内执行 `kubectl get cdr bob-alice -o=jsonpath="{.status.tokenStatus.sourceTokens[*]}"` 得到下面示例返回结果表示授权成功 +```bash +{"effectiveInstances":["kuscia-autonomy-alice-686d6747c-h78lr","kuscia-autonomy-alice-686d6747c-qlh2m"],"expirationTime":"2123-11-24T02:42:12Z","isReady":true,"revision":1,"revisionTime":"2023-11-24T02:42:12Z","token":"dVYZ4Ld/i7msNwuLoT+F8kFaCXbgXk6FziaU5PMASl8ReFfOVpsUt0qijlQaKTLm+OKzABfMQEI4jGeJ/Qsmhr6XOjc+7rkSCa5bmCxw5YVq+UtIFwNnjyRDaBV6A+ViiEMZwuaLIiFMtsPLki2SXzcA7LiLZY3oZvHfgf0m8LenMfU9tmZEptRoTBeL3kKagMBhxLxXL4rZzmI1bBwi49zxwOmg3c/MbDP8JiI6zIM7/NdIAEJhqsbzC5/Yw1qajr7D+NLXhsdrtTDSHN8gSB8D908FxYvcxeUTHqDQJT1mWcXs2N4r/Z/3OydkwJiQQokpjfZsR0T4xmbVTJd5qw=="} +``` + +在 alice、bob 节点 pod 内执行 `kubectl get cdr` 返回 Ready 为 True 时,表示授权成功,示例如下: +```bash +NAME SOURCE DESTINATION HOST AUTHENTICATION READY +alice-bob alice bob kuscia-autonomy-bob.autonomy-bob.svc.cluster.local Token True +bob-alice bob alice Token True +``` +授权失败,请参考[授权错误排查](../../reference/troubleshoot/networkauthorizationcheck.md)文档 + +## 确认部署成功 +### 检查 pod 状态 +pod 处于 running 状态表示部署成功 +```bash +kubectl get po -n autonomy-alice +``` +### 检查数据库连接状态 +数据库内生成表格 kine 并且有数据表示数据库连接成功 \ No newline at end of file diff --git a/docs/deployment/K8s_deployment_kuscia/index.rst b/docs/deployment/K8s_deployment_kuscia/index.rst new file mode 100644 index 00000000..c57869b7 --- /dev/null +++ b/docs/deployment/K8s_deployment_kuscia/index.rst @@ -0,0 +1,8 @@ +K8s 集群部署 kuscia +================ + +.. toctree:: + :maxdepth: 2 + + K8s_master_lite_cn + K8s_p2p_cn \ No newline at end of file diff --git a/docs/deployment/deploy_master_lite_cn.md b/docs/deployment/deploy_master_lite_cn.md index d729ab8a..bc3c56ac 100644 --- a/docs/deployment/deploy_master_lite_cn.md +++ b/docs/deployment/deploy_master_lite_cn.md @@ -29,7 +29,9 @@ docker run --rm --pull always $KUSCIA_IMAGE cat /home/kuscia/scripts/deploy/depl # -k 参数传递的是 master 容器 KusciaAPI 映射到主机的 HTTP 端口,保证和主机上现有的端口不冲突即可 ./deploy.sh master -i 1.1.1.1 -p 18080 -k 18082 ``` -注意:如果 master 的入口网络存在网关时,为了确保节点与 master 之间通信正常,需要网关符合一些要求,详情请参考[这里](./networkrequirements.md) +注意:
+1、如果 master 的入口网络存在网关时,为了确保节点与 master 之间通信正常,需要网关符合一些要求,详情请参考[这里](./networkrequirements.md)
+2、master 节点默认使用 sqlite 作为存储,如果生产部署,需要配置链接到 mysql 数据库的连接串,具体配置可以参考[这里](./kuscia_config_cn.md#id3)
建议使用 curl -kvvv https://ip:port; 检查一下是否访问能通,正常情况下返回的 http 错误码是401,内容是:unauthorized。 示例如下: @@ -191,15 +193,20 @@ docker exec -it ${USER}-kuscia-master sh scripts/deploy/create_cluster_domain_ro docker exec -it ${USER}-kuscia-master sh scripts/deploy/create_cluster_domain_route.sh bob alice http://2.2.2.2:28080 ``` -执行以下命令,查看是否有内容,如果有说明 alice 到 bob 授权建立成功。 +执行以下命令: ```bash -docker exec -it ${USER}-kuscia-master kubectl get cdr alice-bob -o=jsonpath="{.status.tokenStatus.sourceTokens[*]}" +docker exec -it ${USER}-kuscia-master kubectl get cdr alice-bob -o yaml ``` -执行以下命令,查看是否有内容,如果有说明 bob 到 alice 授权建立成功。 +当 `type` 为 Ready 的 condition 的 `status` 值为 "True" 则说明 alice 到 bob 授权建立成功。 + +执行以下命令: ```bash -docker exec -it ${USER}-kuscia-master kubectl get cdr bob-alice -o=jsonpath="{.status.tokenStatus.sourceTokens[*]}" +docker exec -it ${USER}-kuscia-master kubectl get cdr bob-alice -o yaml ``` + +当 `type` 为 Ready 的 condition 的 `status` 值为 "True" 则说明 bob 到 alice 授权建立成功。 + 注意:如果节点之间的入口网络存在网关时,为了确保节点与节点之间通信正常,需要网关符合一些要求,详情请参考[这里](./networkrequirements.md) ### 运行任务 @@ -233,13 +240,13 @@ docker exec -it ${USER}-kuscia-master scripts/deploy/create_domaindata_bob_table 登录到安装 alice 的机器上,为 alice 的测试数据创建 domaindatagrant ```bash -docker exec -it ${USER}-kuscia-lite-alice curl https://127.0.0.1:8070/api/v1/datamesh/domaindatagrant/create -X POST -H 'content-type: application/json' -d '{"author":"alice","domaindata_id":"alice-table","grant_domain":"bob"}' --cacert var/tmp/ca.crt --cert var/tmp/ca.crt --key var/tmp/ca.key +docker exec -it ${USER}-kuscia-lite-alice curl https://127.0.0.1:8070/api/v1/datamesh/domaindatagrant/create -X POST -H 'content-type: application/json' -d '{"author":"alice","domaindata_id":"alice-table","grant_domain":"bob"}' --cacert var/certs/ca.crt --cert var/certs/ca.crt --key var/certs/ca.key ``` 同理,登录到安装 bob 的机器上,为 bob 的测试数据创建 domaindatagrant ```bash -docker exec -it ${USER}-kuscia-lite-bob curl https://127.0.0.1:8070/api/v1/datamesh/domaindatagrant/create -X POST -H 'content-type: application/json' -d '{"author":"bob","domaindata_id":"bob-table","grant_domain":"alice"}' --cacert var/tmp/ca.crt --cert var/tmp/ca.crt --key var/tmp/ca.key +docker exec -it ${USER}-kuscia-lite-bob curl https://127.0.0.1:8070/api/v1/datamesh/domaindatagrant/create -X POST -H 'content-type: application/json' -d '{"author":"bob","domaindata_id":"bob-table","grant_domain":"alice"}' --cacert var/certs/ca.crt --cert var/certs/ca.crt --key var/certs/ca.key ``` #### 执行测试作业 diff --git a/docs/deployment/deploy_p2p_cn.md b/docs/deployment/deploy_p2p_cn.md index 4f8e7faf..d1d96a5c 100644 --- a/docs/deployment/deploy_p2p_cn.md +++ b/docs/deployment/deploy_p2p_cn.md @@ -35,7 +35,9 @@ docker run --rm --pull always $KUSCIA_IMAGE cat /home/kuscia/scripts/deploy/depl # -k 参数传递的是节点容器 KusciaAPI 映射到主机的 MTLS 端口,保证和主机上现有的端口不冲突即可 ./deploy.sh autonomy -n alice -i 1.1.1.1 -p 11080 -k 8082 ``` -注意:节点 id 需要符合 DNS 子域名规则要求,详情请参考[这里](https://kubernetes.io/zh-cn/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names) +注意:
+1、如果节点之间的入口网络存在网关时,为了确保节点与 master 之间通信正常,需要网关符合一些要求,详情请参考[这里](./networkrequirements.md)
+2、alice、bob 节点默认使用 sqlite 作为存储,如果生产部署,需要配置链接到 mysql 数据库的连接串,具体配置可以参考[这里](./kuscia_config_cn.md#id3)
### 部署 bob 节点 @@ -56,7 +58,7 @@ docker run --rm --pull always $KUSCIA_IMAGE cat /home/kuscia/scripts/deploy/depl ```bash # [alice 机器] 将 domain.crt 从容器内部拷贝出来 -docker cp ${USER}-kuscia-autonomy-alice:/home/kuscia/var/tmp/domain.crt . +docker cp ${USER}-kuscia-autonomy-alice:/home/kuscia/var/certs/domain.crt . ``` @@ -85,11 +87,13 @@ alice 建立到 bob 的通信: docker exec -it ${USER}-kuscia-autonomy-alice scripts/deploy/join_to_host.sh alice bob https://2.2.2.2:21080 ``` -执行以下命令,查看是否有内容,如果有说明 alice 到 bob 授权建立成功。 +执行以下命令: ```bash -docker exec -it ${USER}-kuscia-autonomy-alice kubectl get cdr alice-bob -o=jsonpath="{.status.tokenStatus.sourceTokens[*]}" +docker exec -it ${USER}-kuscia-autonomy-alice kubectl get cdr alice-bob -o yaml ``` +当 `type` 为 Ready 的 condition 的 `status` 值为 "True" 则说明 alice 到 bob 授权建立成功。 + 注意:如果节点之间的入口网络存在网关时,为了确保节点与节点之间通信正常,需要网关符合一些要求,详情请参考[这里](./networkrequirements.md) #### 创建 bob 到 alice 的授权 @@ -102,7 +106,7 @@ docker exec -it ${USER}-kuscia-autonomy-alice kubectl get cdr alice-bob -o=jsonp ```bash # [bob 机器] 将 domain.crt 从容器内部拷贝出来 -docker cp ${USER}-kuscia-autonomy-bob:/home/kuscia/var/tmp/domain.crt . +docker cp ${USER}-kuscia-autonomy-bob:/home/kuscia/var/certs/domain.crt . ``` 将 bob 的公钥拷贝到 alice 的机器上的 ${PWD}/kuscia-autonomy-alice-certs 目录中并重命名为 bob.domain.crt: @@ -131,11 +135,13 @@ bob 建立到 alice 的通信: docker exec -it ${USER}-kuscia-autonomy-bob scripts/deploy/join_to_host.sh bob alice https://1.1.1.1:11080 ``` -执行以下命令,查看是否有内容,如果有说明 bob 到 alice 授权建立成功。 +执行以下命令: ```bash -docker exec -it ${USER}-kuscia-autonomy-bob kubectl get cdr bob-alice -o=jsonpath="{.status.tokenStatus.sourceTokens[*]}" +docker exec -it ${USER}-kuscia-autonomy-bob kubectl get cdr bob-alice -o yaml ``` +当 `type` 为 Ready 的 condition 的 `status` 值为 "True" 则说明 bob 到 alice 授权建立成功。 + 注意:如果节点之间的入口网络存在网关时,为了确保节点与节点之间通信正常,需要网关符合一些要求,详情请参考[这里](./networkrequirements.md) #### 准备测试数据 @@ -151,7 +157,7 @@ docker exec -it ${USER}-kuscia-autonomy-alice scripts/deploy/create_domaindata_a 为 alice 的测试数据创建 domaindatagrant ```bash -docker exec -it ${USER}-kuscia-autonomy-alice curl https://127.0.0.1:8070/api/v1/datamesh/domaindatagrant/create -X POST -H 'content-type: application/json' -d '{"author":"alice","domaindata_id":"alice-table","grant_domain":"bob"}' --cacert var/tmp/ca.crt --cert var/tmp/ca.crt --key var/tmp/ca.key +docker exec -it ${USER}-kuscia-autonomy-alice curl https://127.0.0.1:8070/api/v1/datamesh/domaindatagrant/create -X POST -H 'content-type: application/json' -d '{"author":"alice","domaindata_id":"alice-table","grant_domain":"bob"}' --cacert var/certs/ca.crt --cert var/certs/ca.crt --key var/certs/ca.key ``` 同理,登录到安装 bob 的机器上,将默认的测试数据拷贝到之前部署目录的 kuscia-autonomy-bob-data 下 @@ -166,7 +172,7 @@ docker exec -it ${USER}-kuscia-autonomy-bob scripts/deploy/create_domaindata_bob 为 bob 的测试数据创建 domaindatagrant ```bash -docker exec -it ${USER}-kuscia-autonomy-bob curl https://127.0.0.1:8070/api/v1/datamesh/domaindatagrant/create -X POST -H 'content-type: application/json' -d '{"author":"bob","domaindata_id":"bob-table","grant_domain":"alice"}' --cacert var/tmp/ca.crt --cert var/tmp/ca.crt --key var/tmp/ca.key +docker exec -it ${USER}-kuscia-autonomy-bob curl https://127.0.0.1:8070/api/v1/datamesh/domaindatagrant/create -X POST -H 'content-type: application/json' -d '{"author":"bob","domaindata_id":"bob-table","grant_domain":"alice"}' --cacert var/certs/ca.crt --cert var/certs/ca.crt --key var/certs/ca.key ``` #### 执行作业 diff --git a/docs/deployment/index.rst b/docs/deployment/index.rst index 8e8cff67..d6c555ff 100644 --- a/docs/deployment/index.rst +++ b/docs/deployment/index.rst @@ -6,6 +6,7 @@ deploy_p2p_cn deploy_master_lite_cn + K8s_deployment_kuscia/index operation_cn networkrequirements logdescription diff --git a/docs/deployment/kuscia_config_cn.md b/docs/deployment/kuscia_config_cn.md index 96b2d729..7077e7fd 100644 --- a/docs/deployment/kuscia_config_cn.md +++ b/docs/deployment/kuscia_config_cn.md @@ -13,7 +13,9 @@ Kuscia的配置文件由公共配置和每个模式的特殊配置组成, 具 mode: Lite # 节点ID domainID: alice -# 节点私钥配置, 用于节点间的通信认证, 节点应用的证书签发, 经过 base64 编码。 +# 节点私钥配置, 用于节点间的通信认证, 节点应用的证书签发 +# 注意: 目前节点私钥仅支持 pkcs#1 格式的: "BEGIN RSA PRIVATE KEY/END RSA PRIVATE KEY" +# 执行命令 "docker run -it --rm secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow/kuscia scripts/deploy/generate_rsa_key.sh" 生成私钥 domainKeyData: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNRDhDQVFBQ0NRREdsY1Y3MTd5V3l3SURBUUFCQWdrQXR5RGVueG0wUGVFQ0JRRHJVTGUvQWdVQTJBcUQ5UUlFCmFuYkxtd0lFZWFaYUxRSUZBSjZ1S2tjPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo # 日志级别 INFO、DEBUG、WARN logLevel: INFO @@ -68,7 +70,7 @@ datastoreEndpoint: "" ### 配置项详解 - `mode`: 当前 Kuscia 节点部署模式 支持 Lite、Master、Autonomy(不区分大小写), 不同部署模式详情请参考[这里](../reference/architecture_cn) - `domainID`: 当前 Kuscia 实例的 [节点 ID](../reference/concepts/domain_cn), 需要符合 DNS 子域名规则要求,详情请参考[这里](https://kubernetes.io/zh-cn/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names) -- `domainKeyData`: 节点私钥配置, 用于节点间的通信认证, 节点应用的证书签发, 经过 base64 编码。 可以通过命令 `openssl genrsa 2048 | base64` 生成 +- `domainKeyData`: 节点私钥配置, 用于节点间的通信认证, 节点应用的证书签发, 经过 base64 编码。 可以通过命令 `docker run -it --rm secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow/kuscia scripts/deploy/generate_rsa_key.sh` 生成 - `logLevel`: 日志级别 INFO、DEBUG、WARN,默认 INFO - `liteDeployToken`: 节点连接 master 的部署 token,用于节点向 master 注册证书, 只在节点第一次向 master 注册证书时有效,详情请参考[节点中心化部署](./deploy_master_lite_cn) - `masterEndpoint`: 节点连接 master 的地址,比如 https://172.18.0.2:1080 @@ -101,8 +103,8 @@ datastoreEndpoint: "" 如果使用 [start_standalone.sh](https://github.com/secretflow/kuscia/blob/main/scripts/deploy/start_standalone.sh) 或者 [deploy.sh](https://github.com/secretflow/kuscia/blob/main/scripts/deploy/deploy.sh) 脚本部署的 kuscia,kuscia.yaml 文件路径默认是在以下位置(其他部署模式可以借鉴)。 - 宿主机路径: - master:\$HOME/kuscia/\${USER}-kuscia-master/kuscia.yaml - - lite:\$HOME/kuscia/\${USER}-kuscia-lite-${domainID}/kuscia.yaml - - autonomy:\$HOME/kuscia/\${USER}-kuscia-autonomy-${domainID}/kuscia.yaml + - lite:\$HOME/kuscia/\${USER}-kuscia-lite-\${domainID}/kuscia.yaml + - autonomy:\$HOME/kuscia/\${USER}-kuscia-autonomy-\${domainID}/kuscia.yaml - 容器内路径:/home/kuscia/etc/conf/kuscia.yaml 宿主机路径下修改 kuscia.yaml 配置后,重启容器 `docker restart ${container_name}` 生效。 \ No newline at end of file diff --git a/docs/deployment/logdescription.md b/docs/deployment/logdescription.md index c5b734cb..a6df9f04 100644 --- a/docs/deployment/logdescription.md +++ b/docs/deployment/logdescription.md @@ -57,26 +57,26 @@ internal.log 日志格式如下: ```bash -%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(Kuscia-Source)% %REQ(Kuscia-Host?:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION% %DYNAMIC_METADATA(envoy.kuscia:request_body)% %DYNAMIC_METADATA(envoy.kuscia:response_body)% +%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(Kuscia-Source)% %REQ(Kuscia-Host?:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %RESPONSE_FLAGS% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION% %DYNAMIC_METADATA(envoy.kuscia:request_body)% %DYNAMIC_METADATA(envoy.kuscia:response_body)% ``` ```bash # 示例如下: -10.88.0.87 - [23/Oct/2023:01:58:02 +0000] alice fgew-cwqearkz-node-4-0-fed.bob.svc "POST /org.interconnection.link.ReceiverService/Push HTTP/1.1" 743d0da7e6814c2e 743d0da7e6814c2e 200 1791 0 0 0 0 - - -10.88.0.87 - [23/Oct/2023:01:58:02 +0000] alice fgew-cwqearkz-node-4-0-fed.bob.svc "POST /org.interconnection.link.ReceiverService/Push HTTP/1.1" b2f636af87a047f8 b2f636af87a047f8 200 56 0 0 0 0 - - -10.88.0.87 - [23/Oct/2023:01:58:03 +0000] alice fgew-cwqearkz-node-4-0-fed.bob.svc "POST /org.interconnection.link.ReceiverService/Push HTTP/1.1" fdd0c66dfb0fbe45 fdd0c66dfb0fbe45 200 56 0 0 0 0 - - -10.88.0.87 - [23/Oct/2023:01:58:03 +0000] alice fgew-cwqearkz-node-4-0-fed.bob.svc "POST /org.interconnection.link.ReceiverService/Push HTTP/1.1" dc52437872f6e051 dc52437872f6e051 200 171 0 0 0 0 - - +1.2.3.4 - [23/Oct/2023:01:58:02 +0000] alice fgew-cwqearkz-node-4-0-fed.bob.svc "POST /org.interconnection.link.ReceiverService/Push HTTP/1.1" 743d0da7e6814c2e 743d0da7e6814c2e 200 - 1791 0 0 0 0 - - +1.2.3.4 - [23/Oct/2023:01:58:02 +0000] alice fgew-cwqearkz-node-4-0-fed.bob.svc "POST /org.interconnection.link.ReceiverService/Push HTTP/1.1" b2f636af87a047f8 b2f636af87a047f8 200 - 56 0 0 0 0 - - +1.2.3.4 - [23/Oct/2023:01:58:03 +0000] alice fgew-cwqearkz-node-4-0-fed.bob.svc "POST /org.interconnection.link.ReceiverService/Push HTTP/1.1" fdd0c66dfb0fbe45 fdd0c66dfb0fbe45 200 - 56 0 0 0 0 - - +1.2.3.4 - [23/Oct/2023:01:58:03 +0000] alice fgew-cwqearkz-node-4-0-fed.bob.svc "POST /org.interconnection.link.ReceiverService/Push HTTP/1.1" dc52437872f6e051 dc52437872f6e051 200 - 171 0 0 0 0 - - ``` external.log 日志格式如下: ```bash -%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(Kuscia-Source)% %REQ(Kuscia-Host?:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %REQ(content-length)% %DURATION% %DYNAMIC_METADATA(envoy.kuscia:request_body)% %DYNAMIC_METADATA(envoy.kuscia:response_body)% +%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(Kuscia-Source)% %REQ(Kuscia-Host?:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %RESPONSE_FLAGS% %REQ(content-length)% %DURATION% %DYNAMIC_METADATA(envoy.kuscia:request_body)% %DYNAMIC_METADATA(envoy.kuscia:response_body)% ``` ```bash -192.168.128.4 - [23/Oct/2023:04:36:51 +0000] bob kuscia-handshake.alice.svc "GET /handshake HTTP/1.1" 01e87a178e05f967 01e87a178e05f967 200 - 0 - - -192.168.128.6 - [23/Oct/2023:04:36:53 +0000] tee kuscia-handshake.alice.svc "GET /handshake HTTP/1.1" 65a07630561d3814 65a07630561d3814 200 - 0 - - -192.168.128.4 - [23/Oct/2023:04:37:06 +0000] bob kuscia-handshake.alice.svc "GET /handshake HTTP/1.1" 8537c88b929fee67 8537c88b929fee67 200 - 0 - - -192.168.128.6 - [23/Oct/2023:04:37:08 +0000] tee kuscia-handshake.alice.svc "GET /handshake HTTP/1.1" 875d64696b98c6fa 875d64696b98c6fa 200 - 0 - - -``` \ No newline at end of file +1.2.3.4 - [23/Oct/2023:04:36:51 +0000] bob kuscia-handshake.alice.svc "GET /handshake HTTP/1.1" 01e87a178e05f967 01e87a178e05f967 200 - - 0 - - +1.2.3.4 - [23/Oct/2023:04:36:53 +0000] tee kuscia-handshake.alice.svc "GET /handshake HTTP/1.1" 65a07630561d3814 65a07630561d3814 200 - - 0 - - +1.2.3.4 - [23/Oct/2023:04:37:06 +0000] bob kuscia-handshake.alice.svc "GET /handshake HTTP/1.1" 8537c88b929fee67 8537c88b929fee67 200 - - 0 - - +1.2.3.4 - [23/Oct/2023:04:37:08 +0000] tee kuscia-handshake.alice.svc "GET /handshake HTTP/1.1" 875d64696b98c6fa 875d64696b98c6fa 200 - - 0 - - +``` diff --git a/docs/development/register_custom_image.md b/docs/development/register_custom_image.md index cc409a9c..cac60cee 100644 --- a/docs/development/register_custom_image.md +++ b/docs/development/register_custom_image.md @@ -47,7 +47,7 @@ docker cp ${USER}-kuscia-autonomy-alice:/home/kuscia/scripts/tools/register_app_ - `{{IMAGE_NAME}}`: 自定义算法镜像名称 - `{{IMAGE_TAG}}`: 自定义算法镜像标签 -若需注册非 Secretflow 算法镜像,请先准备自定义算法镜像的 AppImage 模版文件,该文件命名规则为`{算法镜像对应的 Kuscia AppImage 名称}-image.yaml`。 +若需注册非 Secretflow 算法镜像,请先准备自定义算法镜像的 AppImage 模版文件,该文件命名规则为`{算法镜像对应的 Kuscia AppImage 名称}.yaml`。 其中符号`{}`中的内容需和`register_app_image.sh`脚本`-n` Flag 指定的参数值一致。 diff --git a/docs/index.md b/docs/index.md index 575364fe..3010c039 100644 --- a/docs/index.md +++ b/docs/index.md @@ -53,6 +53,7 @@ Kuscia(Kubernetes-based Secure Collaborative InfrA)是一款基于 K3s 的 - [指南:多机器部署中心化集群][deploy-master-lite] - [指南:多机器部署点对点集群][deploy-p2p] +- [指南:k8s 集群部署 kuscia][deploy-k8s-deploy-kuscia] - [常见运维操作][ops-cheatsheet] - [网络要求][deploy-networkrequirements] - [日志说明][deploy-logdescription] @@ -60,11 +61,11 @@ Kuscia(Kubernetes-based Secure Collaborative InfrA)是一款基于 K3s 的 [deploy-master-lite]: ./deployment/deploy_master_lite_cn.md [deploy-p2p]: ./deployment/deploy_p2p_cn.md +[deploy-k8s-deploy-kuscia]: ./deployment/K8s_deployment_kuscia/index.rst [ops-cheatsheet]: ./deployment/operation_cn.md [deploy-networkrequirements]: ./deployment/networkrequirements.md [deploy-logdescription]: ./deployment/logdescription.md [deploy-kuscia_config_cn]: ./deployment/kuscia_config_cn.md - ## 更多指南 - [如何运行一个互联互通银联 BFIA 协议作业][how-to-bfia] diff --git a/docs/reference/apis/kusciajob_cn.md b/docs/reference/apis/kusciajob_cn.md index d6e5fc1e..45b9cd6e 100644 --- a/docs/reference/apis/kusciajob_cn.md +++ b/docs/reference/apis/kusciajob_cn.md @@ -198,11 +198,12 @@ protobuf 文件。 ### PartyStatus -| 字段 | 类型 | 选填 | 描述 | -|-----------|--------------------------|----|-------| -| domain_id | string | 必填 | 节点 ID | -| state | [TaskState](#task-state) | 必填 | 总体状态 | -| err_msg | string | 可选 | 错误信息 | +| 字段 | 类型 | 选填 | 描述 | +|-----------|-------------------------------------------|----|---------------| +| domain_id | string | 必填 | 节点 ID | +| state | [TaskState](#task-state) | 必填 | 总体状态 | +| err_msg | string | 可选 | 错误信息 | +| endpoints | [JobPartyEndpoint](#job-party-endpoint)[] | 必填 | 应用对外暴露的访问地址信息 | {#task} @@ -267,3 +268,13 @@ protobuf 文件。 | Running | 1 | 运行中 | | Succeeded | 2 | 成功 | | Failed | 3 | 失败 | + +{#job-party-endpoint} + +### JobPartyEndpoint + +| 字段 | 类型 | 选填 | 描述 | +|-----------|--------|---|-----------------------------------------------------------------------------------------------------| +| port_name | string | 必填 | 应用服务端口名称,详细解释请参考[AppImage](../concepts/appimage_cn.md) `deployTemplates.spec.containers.ports.name` | +| scope | string | 必填 | 应用服务使用范围,详细解释请参考[AppImage](../concepts/appimage_cn.md) `deployTemplates.spec.containers.ports.scope` | +| endpoint | string | 必填 | 应用服务访问地址 | \ No newline at end of file diff --git a/docs/reference/apis/serving_cn.md b/docs/reference/apis/serving_cn.md index 78960340..e91f0d2d 100644 --- a/docs/reference/apis/serving_cn.md +++ b/docs/reference/apis/serving_cn.md @@ -169,26 +169,28 @@ ### PartyServingStatus -| 字段 | 类型 | 选填 | 描述 | -|----------------------|---------------------------------|----|------------| -| domain_id | string | 必填 | 节点ID | -| role | string | 可选 | 角色 | -| state | string | 必填 | 状态 | -| replicas | int32 | 必填 | 应用副本总数 | -| available_replicas | int32 | 必填 | 应用可用副本数 | -| unavailable_replicas | int32 | 必填 | 应用不可用副本数 | -| updatedReplicas | int32 | 必填 | 最新版本的应用副本数 | -| create_time | string | 必填 | 创建时间 | -| endpoints | [Endpoint](#serving-endpoint)[] | 必填 | 应用访问地址列表 | - - -{#serving-endpoint} - -### Endpoint - -| 字段 | 类型 | 选填 | 描述 | -|----------|--------|----|--------| -| endpoint | string | 否 | 应用访问地址 | +| 字段 | 类型 | 选填 | 描述 | +|----------------------|---------------------------------------------------|----|---------------| +| domain_id | string | 必填 | 节点ID | +| role | string | 可选 | 角色 | +| state | string | 必填 | 状态 | +| replicas | int32 | 必填 | 应用副本总数 | +| available_replicas | int32 | 必填 | 应用可用副本数 | +| unavailable_replicas | int32 | 必填 | 应用不可用副本数 | +| updatedReplicas | int32 | 必填 | 最新版本的应用副本数 | +| create_time | string | 必填 | 创建时间 | +| endpoints | [ServingPartyEndpoint](#serving-party-endpoint)[] | 必填 | 应用对外暴露的访问地址信息 | + + +{#serving-party-endpoint} + +### ServingPartyEndpoint + +| 字段 | 类型 | 选填 | 描述 | +|-----------|--------|---|-------------------------------------------------------------------------------------------------------| +| port_name | string | 必填 | 应用服务端口名称,详细解释请参考[AppImage](../concepts/appimage_cn.md) `deployTemplates.spec.containers.ports.name` | +| scope | string | 必填 | 应用服务使用范围, 详细解释请参考[AppImage](../concepts/appimage_cn.md) `deployTemplates.spec.containers.ports.scope` | +| endpoint | string | 必填 | 应用服务访问地址 | {#serving-party} diff --git a/docs/reference/apis/summary_cn.md b/docs/reference/apis/summary_cn.md index 16eaade3..06e53099 100644 --- a/docs/reference/apis/summary_cn.md +++ b/docs/reference/apis/summary_cn.md @@ -69,10 +69,10 @@ Status 携带请求响应的状态信息。 Kuscia master 部署完成之后,会默认生成一个 kuscia API server 证书,你可以通过以下命令获取(以中心化组网模式为例): ```shell -docker cp ${USER}-kuscia-master:/home/kuscia/var/tmp/kusciaapi-server.key . -docker cp ${USER}-kuscia-master:/home/kuscia/var/tmp/kusciaapi-server.crt . -docker cp ${USER}-kuscia-master:/home/kuscia/var/tmp/ca.crt . -docker cp ${USER}-kuscia-master:/home/kuscia/var/tmp/token . +docker cp ${USER}-kuscia-master:/home/kuscia/var/certs/kusciaapi-server.key . +docker cp ${USER}-kuscia-master:/home/kuscia/var/certs/kusciaapi-server.crt . +docker cp ${USER}-kuscia-master:/home/kuscia/var/certs/ca.crt . +docker cp ${USER}-kuscia-master:/home/kuscia/var/certs/token . ``` ### GRPC @@ -120,9 +120,9 @@ def query_domain(): 你也可以使用 GRPC 的客户端工具连接上 Kuscia API,如 [grpcurl](https://github.com/fullstorydev/grpcurl/releases),你需要替换 {} 中的内容: > 如果 GRPC 的主机端口是 8083 ,则可以执行下面的命令,端口号不是 8083 ,可以先用 `docker inspect --format="{{json .NetworkSettings.Ports}}" ${容器名}` 命令检查下端口 ```shell -grpcurl --cert /home/kuscia/var/tmp/kusciaapi-server.crt \ - --key /home/kuscia/var/tmp/kusciaapi-server.key \ - --cacert /home/kuscia/var/tmp/ca.crt \ +grpcurl --cert /home/kuscia/var/certs/kusciaapi-server.crt \ + --key /home/kuscia/var/certs/kusciaapi-server.key \ + --cacert /home/kuscia/var/certs/ca.crt \ -H 'Token: {token}' \ -d '{"domain_id": "alice"}' \ ${USER}-kuscia-master:8083 kuscia.proto.api.v1alpha1.kusciaapi.DomainService.QueryDomain @@ -143,9 +143,9 @@ GRPC 主机上端口:master 或者 autonomy 可以通过 `docker inspect --for 你也可以使用 HTTP 的客户端工具连接上 Kuscia API,如 curl,你需要替换 {} 中的内容: > 如果 GRPC 的主机端口是 8082 ,则可以执行下面的命令,端口号不是 8082 ,可以先用 `docker inspect --format="{{json .NetworkSettings.Ports}}" ${容器名}` 命令检查下端口 ```shell -curl --cert /home/kuscia/var/tmp/kusciaapi-server.crt \ - --key /home/kuscia/var/tmp/kusciaapi-server.key \ - --cacert /home/kuscia/var/tmp/ca.crt \ +curl --cert /home/kuscia/var/certs/kusciaapi-server.crt \ + --key /home/kuscia/var/certs/kusciaapi-server.key \ + --cacert /home/kuscia/var/certs/ca.crt \ --header 'Token: {token}' --header 'Content-Type: application/json' \ 'https://{{USER}-kuscia-master}:8082/api/v1/domain/query' \ -d '{"domain_id": "alice"}' diff --git a/docs/reference/concepts/domaindata_cn.md b/docs/reference/concepts/domaindata_cn.md index 5b81dbe1..abad8741 100644 --- a/docs/reference/concepts/domaindata_cn.md +++ b/docs/reference/concepts/domaindata_cn.md @@ -164,7 +164,7 @@ Data Mesh API 提供 HTTP 和 GRPC 两种访问方法,分别位于 8070 和 80 ```shell docker exec -it ${USER}-kuscia-lite-alice curl -X POST 'https://127.0.0.1:8070/api/v1/datamesh/domaindata/query' --header 'Content-Type: application/json' -d '{ "domaindata_id": "alice-table" -}' --cacert /home/kuscia/var/tmp/ca.crt --cert /home/kuscia/var/tmp/ca.crt --key /home/kuscia/var/tmp/ca.key +}' --cacert /home/kuscia/var/certs/ca.crt --cert /home/kuscia/var/certs/ca.crt --key /home/kuscia/var/certs/ca.key ``` diff --git a/docs/reference/concepts/domainroute_cn.md b/docs/reference/concepts/domainroute_cn.md index d235ae50..4c052f21 100644 --- a/docs/reference/concepts/domainroute_cn.md +++ b/docs/reference/concepts/domainroute_cn.md @@ -33,10 +33,10 @@ spec: 在示例中 * `.metadata.name`:表示路由规则的名称。 * `.metadata.namespace`:表示路由规则所在的命名空间,这里是 Master 的 Namespace。 -* `.spec.authenticationType`:表示节点到 Master 的身份认证方式,目前仅支持 MTLS 和 None(表示不校验)。 +* `.spec.authenticationType`:表示节点到目标节点的身份认证方式,目前仅支持 TOKEN 、MTLS 和 None(表示不校验)。 * `.spec.source`:表示源节点的 Namespace,这里即 Lite 节点的 Namespace。 * `.spec.destination`:表示目标节点的 Namespace,这里即 Master 的命名空间。 -* `.spec.requestHeadersToAdd`:表示 Master 侧的 Envoy 在转发源节点的请求时添加的 headers,示例中 key 为 +* `.spec.requestHeadersToAdd`:表示目标节点侧的 Envoy 在转发源节点的请求时添加的 headers,示例中 key 为 Authorization 的 header 是 Master 为 alice 分配访问 k3s 的令牌。 你可以通过 kubectl 命令来创建、修改、查看、删除 DomainRoute。 @@ -254,6 +254,8 @@ DomainRoute `spec` 的子字段详细介绍如下: DomainRoute `status` 的子字段详细介绍如下: +* `isDestinationUnreachable`:表示 到目标节点是否是不可达的。 +* `isDestinationAuthorized`:表示 和目标节点是否已经握手成功。 * `tokenStatus`:表示 Token 认证方式下,源节点和目标节点协商的 Token 的信息。 * `revisionInitializer`:表示源节点中发起 Token 协商的实例。 * `revisionToken`:表示最新版本的 Token。 @@ -265,6 +267,8 @@ DomainRoute `status` 的子字段详细介绍如下: * `tokens[].revision`:表示 Token 的版本。 * `tokens[].revisionTime`:表示 Token 时间戳。 * `tokens[].token`:表示 BASE64 编码格式的经过节点公钥加密的 Token。 + * `tokens[].isReady`:表示 Token 是否生效。 + * `tokens[].expirationTime`:表示 Token 何时过期。 ### ClusterDomainRoute-template diff --git a/docs/reference/concepts/kusciatask_cn.md b/docs/reference/concepts/kusciatask_cn.md index 3aaa85d7..cbff917b 100644 --- a/docs/reference/concepts/kusciatask_cn.md +++ b/docs/reference/concepts/kusciatask_cn.md @@ -128,37 +128,55 @@ kubectl get kt secretflow-task-psi -o jsonpath={.status} | jq "alice/secretflow-task-psi-0-fed": { "createTime": "2023-08-21T07:43:15Z", "namespace": "alice", + "portName": "fed", + "portNumber": 8080, "readyTime": "2023-08-21T07:43:18Z", + "scope": "Cluster", "serviceName": "secretflow-task-psi-0-fed" }, "alice/secretflow-task-psi-0-global": { "createTime": "2023-08-21T07:43:15Z", "namespace": "alice", + "portName": "global", + "portNumber": 8081, "readyTime": "2023-08-21T07:43:18Z", + "scope": "Domain", "serviceName": "secretflow-task-psi-0-global" }, "alice/secretflow-task-psi-0-spu": { "createTime": "2023-08-21T07:43:15Z", "namespace": "alice", + "portName": "spu", + "portNumber": 54509, "readyTime": "2023-08-21T07:43:18Z", + "scope": "Cluster", "serviceName": "secretflow-task-psi-0-spu" }, "bob/secretflow-task-psi-0-fed": { "createTime": "2023-08-21T07:43:15Z", "namespace": "bob", + "portName": "fed", + "portNumber": 8080, "readyTime": "2023-08-21T07:43:18Z", + "scope": "Cluster", "serviceName": "secretflow-task-psi-0-fed" }, "bob/secretflow-task-psi-0-global": { "createTime": "2023-08-21T07:43:15Z", "namespace": "bob", + "portName": "global", + "portNumber": 8081, "readyTime": "2023-08-21T07:43:18Z", + "scope": "Domain", "serviceName": "secretflow-task-psi-0-global" }, "bob/secretflow-task-psi-0-spu": { "createTime": "2023-08-21T07:43:15Z", "namespace": "bob", + "portName": "spu", + "portNumber": 54509, "readyTime": "2023-08-21T07:43:18Z", + "scope": "Cluster", "serviceName": "secretflow-task-psi-0-spu" } }, @@ -296,6 +314,55 @@ status: podName: task-template-psi-0 podPhase: Succeeded reason: Completed + serviceStatuses: + alice/secretflow-task-psi-0-fed: + createTime: "2023-08-21T07:43:15Z" + namespace: alice + portName: fed + portNumber: 8080 + readyTime: "2023-08-21T07:43:18Z" + scope: Cluster + serviceName: secretflow-task-psi-0-fed + alice/secretflow-task-psi-0-global: + createTime: "2023-08-21T07:43:15Z" + namespace: alice + portName: global + portNumber: 8081 + readyTime: "2023-08-21T07:43:18Z" + scope: Domain + serviceName: secretflow-task-psi-0-global + alice/secretflow-task-psi-0-spu: + createTime: "2023-08-21T07:43:15Z" + namespace: alice + portName: spu + portNumber: 54509 + readyTime: "2023-08-21T07:43:18Z" + scope: Cluster + serviceName: secretflow-task-psi-0-spu + bob/secretflow-task-psi-0-fed: + createTime: "2023-08-21T07:43:15Z" + namespace: bob + portName: fed + portNumber: 8080 + readyTime: "2023-08-21T07:43:18Z" + scope: Cluster + serviceName: secretflow-task-psi-0-fed + bob/secretflow-task-psi-0-global: + createTime: "2023-08-21T07:43:15Z" + namespace: bob + portName: global + portNumber: 8081 + readyTime: "2023-08-21T07:43:18Z" + scope: Domain + serviceName: secretflow-task-psi-0-global + bob/secretflow-task-psi-0-spu: + createTime: "2023-08-21T07:43:15Z" + namespace: bob + portName: spu + portNumber: 54509 + readyTime: "2023-08-21T07:43:18Z" + scope: Cluster + serviceName: secretflow-task-psi-0-spu startTime: "2023-06-26T03:46:38Z" ``` @@ -348,6 +415,14 @@ KusciaTask `status` 的子字段详细介绍如下: - `podStatuses[].reason`: 表示 Pod 处在该阶段的原因。 - `podStatuses[].message`: 表示 Pod 处在该阶段的详细描述信息。 - `podStatuses[].terminationLog`: 表示 Pod 异常终止时的日志信息。 +- `serviceStatuses`: 表示 KusciaTask 相关的所有参与方的 Service 状态信息。 + - `serviceStatuses[].createTime`: 表示 Service 的创建时间戳。 + - `serviceStatuses[].namespace`: 表示 Service 的所在的 Namespace。 + - `serviceStatuses[].portName`: 表示 Service 的端口名称。 + - `serviceStatuses[].portNumber`: 表示 Service 的端口号。 + - `serviceStatuses[].readyTime`: 表示 Service 就绪并可以对外提供服务的时间。 + - `serviceStatuses[].scope`: 表示 Service 的端口使用范围。 + - `serviceStatuses[].serviceName`: 表示 Service 的名称。 - `startTime`: 表示 KusciaTask 第一次被 Kuscia 控制器处理的时间戳。 - `completionTime`: 表示 KusciaTask 运行完成的时间戳。 - `lastReconcileTime`: 表示 KusciaTask 上次更新的时间戳。 diff --git a/docs/reference/troubleshoot/index.rst b/docs/reference/troubleshoot/index.rst index 80932521..8c20bc37 100644 --- a/docs/reference/troubleshoot/index.rst +++ b/docs/reference/troubleshoot/index.rst @@ -9,4 +9,5 @@ networkauthorizationcheck runjobfailed FATEdeployfailed - FATErunjobfailed \ No newline at end of file + FATErunjobfailed + userdefinedserviceroute \ No newline at end of file diff --git a/docs/reference/troubleshoot/userdefinedserviceroute.md b/docs/reference/troubleshoot/userdefinedserviceroute.md new file mode 100644 index 00000000..3b8585e6 --- /dev/null +++ b/docs/reference/troubleshoot/userdefinedserviceroute.md @@ -0,0 +1,55 @@ +# 用户自定义 Service 路由 + +## 说明 +通过复用 kuscia 提供的网络打平功能,可以在 alice 和 bob 中自定义 service,进行请求转发。 + +## 示例 +下面是Alice和Bob的管理平台之间需要进行通信示例: + +- 在Alice节点容器内,手动创建一个 ExternalName 类型的 Service, 其中 ExternalName 设置为 Alice 平台的地址,例如: +```bash +apiVersion: v1 +kind: Service +metadata: + name: alice-pad + namespace: alice +spec: + # 10.88.0.2为alice平台服务的地址 + externalName: 10.88.0.2 + ports: + - name: cluster + # 10010为 alice 平台服务的端口 + port: 10010 + type: ExternalName +status: + loadBalancer: {} +``` +内容 copy 到 alice-pad.yaml,执行 `kubectl create -f alice-pad.yaml` 创建 + +- 在 Bob 节点容器内,手动创建一个 ExternalName 类型的 Service, 其中 ExternalName 设置为 Bob 平台的地址,例如: +```bash +apiVersion: v1 +kind: Service +metadata: + name: bob-pad + namespace: bob +spec: + # 10.88.0.3为bob平台服务的地址 + externalName: 10.88.0.3 + ports: + - name: cluster + # 10010为 bob 平台服务的端口 + port: 10010 + type: ExternalName +status: + loadBalancer: {} +``` +内容 copy 到 bob-pad.yaml,执行 `kubectl create -f bob-pad.yaml` 创建 + +## 访问方法 +下面是Alice访问Bob侧平台的方法,反之类似: + +- 若在 Alice Docker 容器内,直接访问 Bob 平台的方式:`curl -v http://bob-pad.bob.svc` +- 若在 Alice Docker 容器外,那么需要把 Alice 节点的 80 端口暴露到宿主机上,然后通过 `curl -v http://127.0.0.1:{暴露在宿主机上的端口} -H "host:bob-pad.bob.svc"` + +> Tips:通过上述方式,将 Service 暴露出来后,虽然 Kuscia 做了安全性的防护(只有授权后的节点才能访问到该 Service),但是毕竟是内部服务暴露出来给其他机构,请注意服务自身的安全性加强,比如越权漏洞等。 \ No newline at end of file diff --git a/docs/tutorial/run_secretflow_with_api_cn.md b/docs/tutorial/run_secretflow_with_api_cn.md index f3e2bd8f..e6e35807 100644 --- a/docs/tutorial/run_secretflow_with_api_cn.md +++ b/docs/tutorial/run_secretflow_with_api_cn.md @@ -14,7 +14,7 @@ Kuscia API 使用双向 HTTPS,所以需要配置你的客户端库的双向 HT ### 中心化组网模式 -证书文件在 ${USER}-kuscia-master 节点的`/home/kuscia/var/tmp/`目录下: +证书文件在 ${USER}-kuscia-master 节点的`/home/kuscia/var/certs/`目录下: | 文件名 | 文件功能 | | -------------------- | ------------------------------------------------------- | @@ -27,7 +27,7 @@ Kuscia API 使用双向 HTTPS,所以需要配置你的客户端库的双向 HT 证书的配置参考[配置授权](../deployment/deploy_p2p_cn.md#配置授权) -这里以 alice 节点为例,接口需要的证书文件在 ${USER}-kuscia-autonomy-alice 节点的`/home/kuscia/var/tmp/`目录下: +这里以 alice 节点为例,接口需要的证书文件在 ${USER}-kuscia-autonomy-alice 节点的`/home/kuscia/var/certs/`目录下: | 文件名 | 文件功能 | | -------------------- | ------------------------------------------------------- | @@ -107,11 +107,11 @@ docker exec -it ${USER}-kuscia-autonomy-alice ```shell curl -k -X POST 'https://localhost:8082/api/v1/job/create' \ ---header "Token: $(cat /home/kuscia/var/tmp/token)" \ +--header "Token: $(cat /home/kuscia/var/certs/token)" \ --header 'Content-Type: application/json' \ ---cert '/home/kuscia/var/tmp/kusciaapi-server.crt' \ ---key '/home/kuscia/var/tmp/kusciaapi-server.key' \ ---cacert '/home/kuscia/var/tmp/ca.crt' \ +--cert '/home/kuscia/var/certs/kusciaapi-server.crt' \ +--key '/home/kuscia/var/certs/kusciaapi-server.key' \ +--cacert '/home/kuscia/var/certs/ca.crt' \ -d '{ "job_id": "job-best-effort-linear", "initiator": "alice", @@ -172,11 +172,11 @@ job-best-effort-linear 是你在[配置 Job](#configure-kuscia-job) 中指定的 ```shell curl -k -X POST 'https://localhost:8082/api/v1/job/status/batchQuery' \ ---header "Token: $(cat /home/kuscia/var/tmp/token)" \ +--header "Token: $(cat /home/kuscia/var/certs/token)" \ --header 'Content-Type: application/json' \ ---cert '/home/kuscia/var/tmp/kusciaapi-server.crt' \ ---key '/home/kuscia/var/tmp/kusciaapi-server.key' \ ---cacert '/home/kuscia/var/tmp/ca.crt' \ +--cert '/home/kuscia/var/certs/kusciaapi-server.crt' \ +--key '/home/kuscia/var/certs/kusciaapi-server.key' \ +--cacert '/home/kuscia/var/certs/ca.crt' \ -d '{ "job_ids": ["job-best-effort-linear"] }' @@ -213,12 +213,46 @@ curl -k -X POST 'https://localhost:8082/api/v1/job/status/batchQuery' \ { "domain_id": "alice", "state": "Succeeded", - "err_msg": "" + "err_msg": "", + "endpoints": [ + { + "port_name": "spu", + "scope": "Cluster", + "endpoint": "job-psi-0-spu.alice.svc" + }, + { + "port_name": "fed", + "scope": "Cluster", + "endpoint": "job-psi-0-fed.alice.svc" + }, + { + "port_name": "global", + "scope": "Domain", + "endpoint": "job-psi-0-global.alice.svc:8081" + } + ] }, { "domain_id": "bob", "state": "Succeeded", - "err_msg": "" + "err_msg": "", + "endpoints": [ + { + "port_name": "fed", + "scope": "Cluster", + "endpoint": "job-psi-0-fed.bob.svc" + }, + { + "port_name": "global", + "scope": "Domain", + "endpoint": "job-psi-0-global.bob.svc:8081" + }, + { + "port_name": "spu", + "scope": "Cluster", + "endpoint": "job-psi-0-spu.bob.svc" + } + ] } ] }, @@ -233,12 +267,46 @@ curl -k -X POST 'https://localhost:8082/api/v1/job/status/batchQuery' \ { "domain_id": "alice", "state": "Succeeded", - "err_msg": "" + "err_msg": "", + "endpoints": [ + { + "port_name": "spu", + "scope": "Cluster", + "endpoint": "job-split-0-spu.alice.svc" + }, + { + "port_name": "fed", + "scope": "Cluster", + "endpoint": "job-split-0-fed.alice.svc" + }, + { + "port_name": "global", + "scope": "Domain", + "endpoint": "job-split-0-global.alice.svc:8081" + } + ] }, { "domain_id": "bob", "state": "Succeeded", - "err_msg": "" + "err_msg": "", + "endpoints": [ + { + "port_name": "fed", + "scope": "Cluster", + "endpoint": "job-psi-0-fed.bob.svc" + }, + { + "port_name": "global", + "scope": "Domain", + "endpoint": "job-psi-0-global.bob.svc:8081" + }, + { + "port_name": "spu", + "scope": "Cluster", + "endpoint": "job-psi-0-spu.bob.svc" + } + ] } ] } @@ -262,11 +330,11 @@ KusciaJob. ```shell curl -k -X POST 'https://localhost:8082/api/v1/job/delete' \ ---header "Token: $(cat /home/kuscia/var/tmp/token)" \ +--header "Token: $(cat /home/kuscia/var/certs/token)" \ --header 'Content-Type: application/json' \ ---cert '/home/kuscia/var/tmp/kusciaapi-server.crt' \ ---key '/home/kuscia/var/tmp/kusciaapi-server.key' \ ---cacert '/home/kuscia/var/tmp/ca.crt' \ +--cert '/home/kuscia/var/certs/kusciaapi-server.crt' \ +--key '/home/kuscia/var/certs/kusciaapi-server.key' \ +--cacert '/home/kuscia/var/certs/ca.crt' \ -d '{ "job_id": "job-best-effort-linear" }' @@ -288,7 +356,7 @@ KusciaJob 的算子参数由 `taskInputConfig` 字段定义,对于不同的算 对于 secretflow ,请参考:[Secretflow 官网](https://www.secretflow.org.cn/)。 -{#http-server-error} +{#http-client-error} ## HTTP 客户端错误处理 diff --git a/etc/conf/domainroute/listeners/external_listeners.json.tmpl b/etc/conf/domainroute/listeners/external_listeners.json.tmpl index 2e5f177b..9dfdf243 100644 --- a/etc/conf/domainroute/listeners/external_listeners.json.tmpl +++ b/etc/conf/domainroute/listeners/external_listeners.json.tmpl @@ -130,7 +130,7 @@ "path": "{{.LogPrefix}}/zipkin.log", "log_format": { "text_format_source": { - "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION%\n" + "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %RESPONSE_FLAGS% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION%\n" } } } @@ -154,7 +154,7 @@ "path": "{{.LogPrefix}}/kubernetes.log", "log_format": { "text_format_source": { - "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION%\n" + "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %RESPONSE_FLAGS% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION%\n" } } } @@ -176,7 +176,7 @@ "path": "{{.LogPrefix}}/prometheus.log", "log_format": { "text_format_source": { - "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION%\n" + "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %RESPONSE_FLAGS% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION%\n" } } } @@ -201,7 +201,7 @@ "path": "{{.LogPrefix}}/external.log", "log_format": { "text_format_source": { - "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(Kuscia-Source)% %REQ(Kuscia-Host?:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %REQ(content-length)% %DURATION% %DYNAMIC_METADATA(envoy.kuscia:request_body)% %DYNAMIC_METADATA(envoy.kuscia:response_body)%\n" + "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(Kuscia-Source)% %REQ(Kuscia-Host?:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %RESPONSE_FLAGS% %REQ(content-length)% %DURATION% %DYNAMIC_METADATA(envoy.kuscia:request_body)% %DYNAMIC_METADATA(envoy.kuscia:response_body)%\n" } } } diff --git a/etc/conf/domainroute/listeners/internal_listeners.json.tmpl b/etc/conf/domainroute/listeners/internal_listeners.json.tmpl index becb7518..7d3f3c6b 100644 --- a/etc/conf/domainroute/listeners/internal_listeners.json.tmpl +++ b/etc/conf/domainroute/listeners/internal_listeners.json.tmpl @@ -107,7 +107,7 @@ "path": "{{.LogPrefix}}/zipkin.log", "log_format": { "text_format_source": { - "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION%\n" + "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %RESPONSE_FLAGS% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION%\n" } } } @@ -131,7 +131,7 @@ "path": "{{.LogPrefix}}/kubernetes.log", "log_format": { "text_format_source": { - "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION%\n" + "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %RESPONSE_FLAGS% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION%\n" } } } @@ -153,7 +153,7 @@ "path": "{{.LogPrefix}}/prometheus.log", "log_format": { "text_format_source": { - "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION%\n" + "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %RESPONSE_FLAGS% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION%\n" } } } @@ -178,7 +178,7 @@ "path": "{{.LogPrefix}}/internal.log", "log_format": { "text_format_source": { - "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(Kuscia-Source)% %REQ(Kuscia-Host?:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION% %DYNAMIC_METADATA(envoy.kuscia:request_body)% %DYNAMIC_METADATA(envoy.kuscia:response_body)%\n" + "inline_string": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - [%START_TIME(%d/%b/%Y:%H:%M:%S %z)%] %REQ(Kuscia-Source)% %REQ(Kuscia-Host?:authority)% \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %REQ(x-b3-traceid)% %REQ(x-b3-spanid)% %RESPONSE_CODE% %RESPONSE_FLAGS% %REQ(content-length)% %DURATION% %REQUEST_DURATION% %RESPONSE_DURATION% %RESPONSE_TX_DURATION% %DYNAMIC_METADATA(envoy.kuscia:request_body)% %DYNAMIC_METADATA(envoy.kuscia:response_body)%\n" } } } diff --git a/etc/conf/kuscia.yaml b/etc/conf/kuscia.yaml new file mode 100644 index 00000000..e682d519 --- /dev/null +++ b/etc/conf/kuscia.yaml @@ -0,0 +1,100 @@ +############################################################################# +############ Common Configs ############ +############################################################################# +# Deploy mode +mode: Lite +# Domain ID +domainID: alice +# Domain RSA private key encoded with base64。 +domainKeyData: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNRDhDQVFBQ0NRREdsY1Y3MTd5V3l3SURBUUFCQWdrQXR5RGVueG0wUGVFQ0JRRHJVTGUvQWdVQTJBcUQ5UUlFCmFuYkxtd0lFZWFaYUxRSUZBSjZ1S2tjPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo +# Logging level, INFO、DEBUG、WARN +logLevel: INFO + +############################################################################# +############ Lite Configs ############ +############################################################################# +# The deploy token used by the lite connecting to the master. +liteDeployToken: LS0tLS1CRUdJTi +# The master endpoint the lite connecting to +masterEndpoint: https://172.18.0.2:1080 + +############################################################################# +############ Lite、Autonomy Configs ############ +############################################################################# +# runc or runk +runtime: runc +# Runk configs +runk: + # Organization's k8s namespace scheduling pods + namespace: "" + # Organization's k8s cluster dns config + dnsServers: + # The kubeconfig of the organization's k8s cluster, the default serviceaccount will be used. + kubeconfigFile: + +# The capacity of the domain that can be used to schedule app pods. +capacity: + cpu: #4 + memory: #8Gi + pods: #500 + storage: #100Gi + +# agent image configs +image: + pullPolicy: "" + defaultRegistry: "" + registries: + - name: "" + endpoint: "" + username: "" + password: "" + +############################################################################# +############ Master Configs ############ +############################################################################# +# Database dsn connection string +# Example: mysql://username:password@tcp(hostname:3306)/database-name +datastoreEndpoint: "" + +############################################################################# +############ SecretBackend Configs ############ +############################################################################# +# SecretBackend means the secret information hosting backend used by Kuscia. +# Currently, ConfManager and config loaders will use these backends. +# +# It is always required to provide at least one secret information hosting for ConfManager to use. +# If no secret information hosting backend is provided, a mem type backend named "default" will be added. +# +# The secret information hosting backend supports the following types: +# mem: +# The mem type secret information hosting backend will read the key-value structure from params.preset and store it in an in-memory map structure. +# You can read or write to it, but any changes made will be lost after Kuscia is restarted. +# This is only intended for demo purposes and should not be used in a production environment!!! +# rfile: +# The rfile type secret information hosting backend uses a local file for storage and encrypts the data using an encryption algorithm, currently supporting AES. +# This allows you to retain your modifications even when Kuscia is restarted. +# However, if you need to use the rfile type backend in a multi-replica environment, you should consider alternative backends. +# +#secretBackends: +# - name: default +# driver: mem +# params: +# preset: +# # you can add any kv you needed. +# someKey: someValue +# - name: rfile1 +# driver: rfile +# params: +# # the file path to store/load secret. required, no default. +# path: /home/kuscia/var/tmp/rfile1 +# cipher: +# type: aes +# aes: +# # key to encrypt/decrypt. +# key: thisismyaeskey + +# ConfManager will use the secret information hosting backend named "confManager.backend" as specified. +# If not specified, ConfManager will use the backend named "default". +# +#confManager: +# backend: default \ No newline at end of file diff --git a/hack/k8s/autonomy/configmap.yaml b/hack/k8s/autonomy/configmap.yaml new file mode 100644 index 00000000..f43480e9 --- /dev/null +++ b/hack/k8s/autonomy/configmap.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kuscia-autonomy-alice-cm + namespace: autonomy-alice +data: + kuscia.yaml: |- + # 启动模式 + mode: Autonomy + + # 节点ID + # 示例: domainID: alice + domainID: {{.DOMAIN_ID}} + # 节点私钥配置, 用于节点间的通信认证, 节点应用的证书签发 + # 注意: 目前节点私钥仅支持 pkcs#1 格式的: "BEGIN RSA PRIVATE KEY/END RSA PRIVATE KEY" + # 执行命令 "docker run -it --rm secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow/kuscia scripts/deploy/generate_rsa_key.sh" 生成私钥 + domainKeyData: {{.DOMAIN_KEY_DATA}} + + # 日志级别 INFO、DEBUG、WARN + logLevel: INFO + + # runc or runk + runtime: runk + + runk: + # 任务调度到指定的机构 k8s namespace上 + namespace: autonomy-alice + # k8s 集群的 pod dns 配置, 用于解析节点的应用域名, dns 的地址为 pod service 地址, 此处以 "1.1.1.1" 为例 + dnsServers: + # - kuscia-dns-lb-server + - 1.1.1.1 + # k8s 集群的 kubeconfig, 不填默认 serviceaccount; 当前请不填,默认使用 serviceaccount + kubeconfigFile: + + # 节点的可调度容量, runc 不填会自动获取当前容器的系统资源, runk 模式下需要手动配置 + capacity: + cpu: 4 + memory: 4Gi + storage: 100Gi + + # agent 镜像配置, 使用私有仓库存储镜像时配置(默认无需配置) + image: + pullPolicy: #使用镜像仓库|使用本地 + defaultRegistry: "" + registries: + - name: "" + endpoint: "" + username: "" + password: "" + + ####### master节点配置 ######### + # 数据库连接串,不填默认使用 sqlite (dsnXXXX) dns:// + # 注意: database 名称暂不支持 "-" 特殊字符 + datastoreEndpoint: "mysql://user:password@tcp(host:3306)/database?charset=utf8mb4&parseTime=True&loc=Local" + # KusciaAPI 以及节点对外网关使用的通信协议, NOTLS/TLS/MTLS + protocol: NOTLS diff --git a/hack/k8s/autonomy/deployment.yaml b/hack/k8s/autonomy/deployment.yaml new file mode 100644 index 00000000..3ec8bbc5 --- /dev/null +++ b/hack/k8s/autonomy/deployment.yaml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kuscia-autonomy-alice + namespace: autonomy-alice +spec: + replicas: 2 + selector: + matchLabels: + app: kuscia-autonomy-alice + template: + metadata: + labels: + app: kuscia-autonomy-alice + spec: + containers: + - command: + - kuscia + - start + - -c + - etc/conf/kuscia.yaml + env: + - name: REGISTRY_ENDPOINT + value: secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow + - name: NAMESPACE + value: alice + - name: TZ + value: Asia/Shanghai + image: secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow/kuscia:latest + imagePullPolicy: Always + name: alice + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /home/kuscia/etc/conf/kuscia.yaml + name: kuscia-config + subPath: kuscia.yaml + workingDir: /home/kuscia + imagePullSecrets: + - name: node-image-secret + automountServiceAccountToken: true + volumes: + - configMap: + defaultMode: 420 + name: kuscia-autonomy-alice-cm + name: kuscia-config diff --git a/hack/k8s/autonomy/rbac.yaml b/hack/k8s/autonomy/rbac.yaml new file mode 100644 index 00000000..4b083ae4 --- /dev/null +++ b/hack/k8s/autonomy/rbac.yaml @@ -0,0 +1,34 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: runk-role + namespace: autonomy-alice +rules: + - apiGroups: + - "" + resources: + - pods + - configmaps + - secrets + verbs: + - '*' + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: runk-rolebinding + namespace: autonomy-alice +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: runk-role +subjects: + - kind: ServiceAccount + name: default + namespace: autonomy-alice diff --git a/hack/k8s/autonomy/service.yaml b/hack/k8s/autonomy/service.yaml new file mode 100644 index 00000000..18a7f01c --- /dev/null +++ b/hack/k8s/autonomy/service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: kuscia-autonomy-alice + namespace: autonomy-alice +spec: + selector: + app: kuscia-autonomy-alice + type: ClusterIP + ports: + - name: envoy + port: 1080 + protocol: TCP + targetPort: 1080 + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: kusciaapi + port: 8082 + protocol: TCP + targetPort: 8082 \ No newline at end of file diff --git a/hack/k8s/lite/configmap.yaml b/hack/k8s/lite/configmap.yaml new file mode 100644 index 00000000..31e8a5da --- /dev/null +++ b/hack/k8s/lite/configmap.yaml @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kuscia-lite-alice-cm + namespace: lite-alice +data: + kuscia.yaml: |- + # 启动模式 + mode: Lite + # 节点ID + # 示例: domainID: alice + domainID: {{.DOMAIN_ID}} + # 节点私钥配置, 用于节点间的通信认证, 节点应用的证书签发 + # 注意: 目前节点私钥仅支持 pkcs#1 格式的: "BEGIN RSA PRIVATE KEY/END RSA PRIVATE KEY" + # 执行命令 "docker run -it --rm secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow/kuscia scripts/deploy/generate_rsa_key.sh" 生成私钥 + domainKeyData: {{.DOMAIN_KEY_DATA}} + + # 日志级别 INFO、DEBUG、WARN + logLevel: INFO + + + # master + # 节点连接 master 的部署 token, 用于节点向 master 注册证书, 只在节点第一次向 master 注册证书时有效 + liteDeployToken: {{.DEPLOY_TOKEN}} + # 节点连接 master 的地址 + # 示例: http://kuscia-master.kuscia-master.svc.cluster.local:1080 + masterEndpoint: {{.MASTER_ENDPOINT}} + + # runc or runk + runtime: runk + + # 当 runtime 为 runk 时配置 + runk: + # 任务调度到指定的机构 k8s namespace上 + namespace: lite-alice + # 机构 k8s 集群的 pod dns 配置, 用于解析节点的应用域名, dns 的地址为 pod service 地址, 此处以 "1.1.1.1" 为例 + dnsServers: + # - kuscia-dns-lb-server + - 1.1.1.1 + # k8s 集群的 kubeconfig, 不填默认 serviceaccount; 当前请不填,默认使用 serviceaccount + kubeconfigFile: + + # 节点可用于调度应用的容量, runc 不填会自动获取当前容器的系统资源, runk 模式下需要手动配置 + capacity: + cpu: 4 + memory: 4Gi + pods: 500 + storage: 100Gi + + # KusciaAPI 以及节点对外网关使用的通信协议, NOTLS/TLS/MTLS + protocol: NOTLS + + # agent 镜像配置, 使用私有仓库存储镜像时配置(默认无需配置) + image: + pullPolicy: #使用镜像仓库|使用本地 + defaultRegistry: "" + registries: + - name: "" + endpoint: "" + username: "" + password: "" \ No newline at end of file diff --git a/hack/k8s/lite/deployment.yaml b/hack/k8s/lite/deployment.yaml new file mode 100644 index 00000000..317ab824 --- /dev/null +++ b/hack/k8s/lite/deployment.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kuscia-lite-alice + namespace: lite-alice +spec: + replicas: 1 + selector: + matchLabels: + app: kuscia-lite-alice + template: + metadata: + labels: + app: kuscia-lite-alice + spec: + containers: + - command: + - kuscia + - start + - -c + - etc/conf/kuscia.yaml + env: + - name: REGISTRY_ENDPOINT + value: secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow + - name: NAMESPACE + value: alice + - name: TZ + value: Asia/Shanghai + image: secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow/kuscia:latest + imagePullPolicy: Always + name: alice + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /home/kuscia/etc/conf/kuscia.yaml + name: kuscia-config + subPath: kuscia.yaml + workingDir: /home/kuscia + automountServiceAccountToken: true + volumes: + - configMap: + defaultMode: 420 + name: kuscia-lite-alice-cm + name: kuscia-config \ No newline at end of file diff --git a/hack/k8s/lite/rbac.yaml b/hack/k8s/lite/rbac.yaml new file mode 100644 index 00000000..a49a2ddd --- /dev/null +++ b/hack/k8s/lite/rbac.yaml @@ -0,0 +1,34 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: runk-role + namespace: lite-alice +rules: + - apiGroups: + - "" + resources: + - pods + - configmaps + - secrets + verbs: + - '*' + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: runk-rolebinding + namespace: lite-alice +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: runk-role +subjects: + - kind: ServiceAccount + name: default + namespace: lite-alice \ No newline at end of file diff --git a/hack/k8s/lite/service.yaml b/hack/k8s/lite/service.yaml new file mode 100644 index 00000000..3fb4b36e --- /dev/null +++ b/hack/k8s/lite/service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: kuscia-lite-alice + namespace: lite-alice +spec: + selector: + app: kuscia-lite-alice + type: ClusterIP + ports: + - name: envoy + port: 1080 + protocol: TCP + targetPort: 1080 + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: kusciaapi + port: 8082 + protocol: TCP + targetPort: 8082 \ No newline at end of file diff --git a/hack/k8s/master/configmap.yaml b/hack/k8s/master/configmap.yaml new file mode 100644 index 00000000..3407a610 --- /dev/null +++ b/hack/k8s/master/configmap.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +data: + kuscia.yaml: |- + # 部署模式 + mode: Master + # 节点ID + # 示例: domainID: kuscia-system + domainID: {{.DOMAIN_ID}} + # 节点私钥配置, 用于节点间的通信认证, 节点应用的证书签发 + # 注意: 目前节点私钥仅支持 pkcs#1 格式的: "BEGIN RSA PRIVATE KEY/END RSA PRIVATE KEY" + # 执行命令 "docker run -it --rm secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow/kuscia scripts/deploy/generate_rsa_key.sh" 生成私钥 + domainKeyData: {{.DOMAIN_KEY_DATA}} + + # 日志级别 INFO、DEBUG、WARN + logLevel: INFO + + ####### master节点配置 ######### + # 数据库连接串,不填默认使用 + # 注意: database 名称暂不支持 "-" 特殊字符 + datastoreEndpoint: "mysql://user:password@tcp(host:3306)/database?charset=utf8mb4&parseTime=True&loc=Local" + # KusciaAPI 以及节点对外网关使用的通信协议, NOTLS/TLS/MTLS + protocol: NOTLS +kind: ConfigMap +metadata: + name: kuscia-master-cm + namespace: kuscia-master \ No newline at end of file diff --git a/hack/k8s/master/deployment.yaml b/hack/k8s/master/deployment.yaml new file mode 100644 index 00000000..b98aefd8 --- /dev/null +++ b/hack/k8s/master/deployment.yaml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kuscia-master + namespace: kuscia-master +spec: + replicas: 1 + selector: + matchLabels: + app: kuscia-master + template: + metadata: + labels: + app: kuscia-master + spec: + containers: + - command: + - kuscia + - start + - -c + - etc/conf/kuscia.yaml + ports: + - containerPort: 6443 + env: + - name: REGISTRY_ENDPOINT + value: secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow + - name: NAMESPACE + value: kuscia-system + - name: TZ + value: Asia/Shanghai + image: secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow/kuscia:latest + imagePullPolicy: Always + name: master + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /home/kuscia/etc/conf/kuscia.yaml + name: kuscia-master-conf + subPath: kuscia.yaml + workingDir: /home/kuscia + automountServiceAccountToken: true + volumes: + - configMap: + defaultMode: 420 + name: kuscia-master-cm + name: kuscia-master-conf \ No newline at end of file diff --git a/hack/k8s/master/service.yaml b/hack/k8s/master/service.yaml new file mode 100644 index 00000000..1b4682a9 --- /dev/null +++ b/hack/k8s/master/service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: kuscia-master + namespace: kuscia-master +spec: + selector: + app: kuscia-master + type: ClusterIP + ports: + - name: envoy + port: 1080 + protocol: TCP + targetPort: 1080 + - name: kusciaapi + port: 8082 + protocol: TCP + targetPort: 8082 \ No newline at end of file diff --git a/pkg/common/constants.go b/pkg/common/constants.go index 0b842f2a..f5342456 100644 --- a/pkg/common/constants.go +++ b/pkg/common/constants.go @@ -18,6 +18,8 @@ package common const ( // LabelPortScope represents port usage scope. Its values may be Local, Domain, Cluster. Refer to PortScope for more details. LabelPortScope = "kuscia.secretflow/port-scope" + // LabelPortName represents port name which defined in AppImage container port. + LabelPortName = "kuscia.secretflow/port-name" LabelController = "kuscia.secretflow/controller" LabelGatewayProxy = "kuscia.secretflow/gateway-proxy" @@ -43,10 +45,30 @@ const ( LabelDomainDataGrantDomain = "kuscia.secretflow/domaindatagrant-domain" LabelSelfClusterAsInitiator = "kuscia.secretflow/self-cluster-as-initiator" - LabelInterConnProtocolType = "kuscia.secretflow/interconn-protocol-type" - LabelJobID = "kuscia.secretflow/job-id" - LabelTaskID = "kuscia.secretflow/task-id" - LabelTaskAlias = "kuscia.secretflow/task-alias" + // LabelInterConnProtocolType is a label to specify the interconn protocol type of job + // For KusciaBetaJob, it's only used for partner job + LabelInterConnProtocolType = "kuscia.secretflow/interconn-protocol-type" + LabelJobID = "kuscia.secretflow/job-id" + LabelTaskID = "kuscia.secretflow/task-id" + LabelTaskAlias = "kuscia.secretflow/task-alias" + + // LabelJobStage is a label to specify the current stage of job. + LabelJobStage = "kuscia.secretflow/job-stage" + // LabelJobStageTrigger is a label to specify who trigger the current stage of job. + LabelJobStageTrigger = "kuscia.secretflow/job-stage-trigger" + + // LabelInterConnKusciaParty is a label of a job which has parties interconnected with kuscia protocol, + // the value is a series of domain id join with '_', such as alice_bob_carol . + LabelInterConnKusciaParty = "kuscia.secretflow/interconn-kuscia-parties" + + // LabelInterConnBFIAParty is a label of a job which has parties interconnected with bfia protocol, + // the value is a series of domain id join with '_', such as alice_bob_carol . + LabelInterConnBFIAParty = "kuscia.secretflow/interconn-bfia-parties" + + // LabelTargetDomain is a label represent the target domain of a partner cluster, + // which labeled on the mirror custom resources in mocked master domain of partner cluster, + // the custom resources include DomainData, DomainDataGrant, etc. + LabelTargetDomain = "kuscia.secretflow/target-domain" LabelKusciaDeploymentAppType = "kuscia.secretflow/app-type" LabelKusciaDeploymentUID = "kuscia.secretflow/kd-uid" @@ -165,3 +187,11 @@ const ( ) const DomainCsrExtensionID = "1.2.3.4" + +const ( + CertPrefix = "var/certs/" + LogPrefix = "var/logs/" + StdoutPrefix = "var/stdout/" + TmpPrefix = "var/tmp/" + ConfPrefix = "etc/conf/" +) diff --git a/pkg/confmanager/commands/root.go b/pkg/confmanager/commands/root.go index e8364ea7..848d2c94 100644 --- a/pkg/confmanager/commands/root.go +++ b/pkg/confmanager/commands/root.go @@ -26,7 +26,6 @@ import ( // register driver _ "github.com/secretflow/kuscia/pkg/secretbackend/mem" - _ "github.com/secretflow/kuscia/pkg/secretbackend/rfile" ) func Run(ctx context.Context, conf *config.ConfManagerConfig) error { diff --git a/pkg/controllers/clusterdomainroute/controller.go b/pkg/controllers/clusterdomainroute/controller.go index d41dd32f..baba0c8f 100644 --- a/pkg/controllers/clusterdomainroute/controller.go +++ b/pkg/controllers/clusterdomainroute/controller.go @@ -323,14 +323,14 @@ func (c *controller) syncHandler(ctx context.Context, key string) error { // Create domainroute in source namespace srcdr, err := c.checkDomainRoute(ctx, cdr, sourceRole, cdr.Spec.Source, drName) if err != nil { - nlog.Error(err.Error()) + nlog.Warnf(err.Error()) return err } // Create domainroute in destination namespace destdr, err := c.checkDomainRoute(ctx, cdr, destRole, cdr.Spec.Destination, drName) if err != nil { - nlog.Error(err.Error()) + nlog.Warnf(err.Error()) return err } @@ -496,7 +496,7 @@ func (c *controller) syncStatusFromDomainroute(cdr *kusciaapisv1alpha1.ClusterDo needUpdate = true if len(cdr.Status.TokenStatus.SourceTokens) == 0 { - if !srcdr.Status.IsDestinationAuthrized { + if !srcdr.Status.IsDestinationAuthorized { setCondition(&cdr.Status, newCondition(kusciaapisv1alpha1.ClusterDomainRouteReady, corev1.ConditionFalse, "DestinationIsNotAuthrized", "TokenNotGenerate")) } else { setCondition(&cdr.Status, newCondition(kusciaapisv1alpha1.ClusterDomainRouteReady, corev1.ConditionFalse, "TokenNotGenerate", "TokenNotGenerate")) @@ -603,7 +603,7 @@ func (c *controller) updateLabel(ctx context.Context, cdr *kusciaapisv1alpha1.Cl } if cdr, err = c.kusciaClient.KusciaV1alpha1().ClusterDomainRoutes().Update(ctx, cdrCopy, metav1.UpdateOptions{}); err != nil { - nlog.Errorf("Update cdr, src(%s) dst(%s) failed with (%s)", cdrCopy.Spec.Source, cdrCopy.Spec.Destination, err.Error()) + nlog.Warnf("Update cdr, src(%s) dst(%s) failed with (%s)", cdrCopy.Spec.Source, cdrCopy.Spec.Destination, err.Error()) return cdr, err } diff --git a/pkg/controllers/clusterdomainroute/controller_test.go b/pkg/controllers/clusterdomainroute/controller_test.go index 559e3209..b8b7062c 100644 --- a/pkg/controllers/clusterdomainroute/controller_test.go +++ b/pkg/controllers/clusterdomainroute/controller_test.go @@ -315,7 +315,7 @@ func Test_controller_syncDomainRouteStatus(t *testing.T) { dstDr := &kusciaapisv1alpha1.DomainRoute{ Status: kusciaapisv1alpha1.DomainRouteStatus{ - IsDestinationAuthrized: false, + IsDestinationAuthorized: false, IsDestinationUnreachable: false, TokenStatus: kusciaapisv1alpha1.DomainRouteTokenStatus{ RevisionToken: mockToken, diff --git a/pkg/controllers/domain/authorization_resource.go b/pkg/controllers/domain/authorization_resource.go index 4164dd50..209861e2 100644 --- a/pkg/controllers/domain/authorization_resource.go +++ b/pkg/controllers/domain/authorization_resource.go @@ -161,8 +161,9 @@ func (c *Controller) createOrUpdateAuth(domain *kusciaapisv1alpha1.Domain) error newDomain.Labels = make(map[string]string, 0) } newDomain.Labels[constants.LabelDomainAuth] = authCompleted + if _, err := c.kusciaClient.KusciaV1alpha1().Domains().Update(c.ctx, newDomain, metav1.UpdateOptions{}); err != nil { - nlog.Errorf("Update domain [%s] auth label error: %s", domainID, err.Error()) + nlog.Warnf("Update domain [%s] auth label error: %s", domainID, err.Error()) return err } return nil diff --git a/pkg/controllers/domain/controller.go b/pkg/controllers/domain/controller.go index 22fafc61..79a03a26 100644 --- a/pkg/controllers/domain/controller.go +++ b/pkg/controllers/domain/controller.go @@ -338,17 +338,15 @@ func (c *Controller) syncHandler(ctx context.Context, key string) (err error) { // create is used to create resource under domain. func (c *Controller) create(domain *kusciaapisv1alpha1.Domain) error { if err := c.createNamespace(domain); err != nil { - nlog.Errorf("Create domain %v namespace failed: %v", domain.Name, err.Error()) + nlog.Warnf("Create domain %v namespace failed: %v", domain.Name, err.Error()) return err } - if err := c.createResourceQuota(domain); err != nil { - nlog.Errorf("Create domain %v resource quota failed: %v", domain.Name, err.Error()) + nlog.Warnf("Create domain %v resource quota failed: %v", domain.Name, err.Error()) return err } - if err := c.createOrUpdateAuth(domain); err != nil { - nlog.Errorf("Create domain %v auth failed: %v", domain.Name, err.Error()) + nlog.Warnf("Create domain %v auth failed: %v", domain.Name, err.Error()) return err } @@ -358,22 +356,22 @@ func (c *Controller) create(domain *kusciaapisv1alpha1.Domain) error { // update is used to update resource under domain. func (c *Controller) update(domain *kusciaapisv1alpha1.Domain) error { if err := c.updateNamespace(domain); err != nil { - nlog.Errorf("Update domain %v namespace failed: %v", domain.Name, err.Error()) + nlog.Warnf("Update domain %v namespace failed: %v", domain.Name, err.Error()) return err } if err := c.updateResourceQuota(domain); err != nil { - nlog.Errorf("Update domain %v resource quota failed: %v", domain.Name, err.Error()) + nlog.Warnf("Update domain %v resource quota failed: %v", domain.Name, err.Error()) return err } if err := c.createOrUpdateAuth(domain); err != nil { - nlog.Errorf("update domain %v auth failed: %v", domain.Name, err.Error()) + nlog.Warnf("update domain %v auth failed: %v", domain.Name, err.Error()) return err } if err := c.syncDomainStatus(domain); err != nil { - nlog.Errorf("sync domain %v status failed: %v", domain.Name, err.Error()) + nlog.Warnf("sync domain %v status failed: %v", domain.Name, err.Error()) return err } diff --git a/pkg/controllers/domain/domain.go b/pkg/controllers/domain/domain.go index ad66c475..215012a5 100644 --- a/pkg/controllers/domain/domain.go +++ b/pkg/controllers/domain/domain.go @@ -53,7 +53,7 @@ func (c *Controller) syncDomainStatus(dm *kusciaapisv1alpha1.Domain) error { newStatus.NodeStatuses = c.newDomainNodeStatus(deepCopy) newStatus.DeployTokenStatuses = c.newDomainTokenStatus(deepCopy) if !c.isDomainStatusEqual(oldStatus, newStatus) { - nlog.Infof("Updating domain %v status", deepCopy.Name) + nlog.Infof("Update domain %v status", deepCopy.Name) deepCopy.Status = newStatus return c.updateDomainStatus(deepCopy) } diff --git a/pkg/controllers/domainroute/check.go b/pkg/controllers/domainroute/check.go index 2b7fe591..81c423ff 100644 --- a/pkg/controllers/domainroute/check.go +++ b/pkg/controllers/domainroute/check.go @@ -42,7 +42,7 @@ func compareTokens(tokens1, tokens2 []kusciaapisv1alpha1.DomainRouteToken) bool func (c *controller) checkEffectiveInstances(dr *kusciaapisv1alpha1.DomainRoute) bool { if len(dr.Status.TokenStatus.Tokens) == 0 { - nlog.Errorf("Domainroute %s/%s checkEffectiveInstances failed: tokens is nil", dr.Namespace, dr.Name) + nlog.Warnf("Domainroute %s/%s checkEffectiveInstances failed: tokens is nil", dr.Namespace, dr.Name) return false } diff --git a/pkg/controllers/kusciadeployment/reconcile.go b/pkg/controllers/kusciadeployment/reconcile.go index 31818dd6..9a8dc55f 100644 --- a/pkg/controllers/kusciadeployment/reconcile.go +++ b/pkg/controllers/kusciadeployment/reconcile.go @@ -387,6 +387,8 @@ func (c *Controller) generateDeployment(partyKitInfo *PartyKitInfo) (*appsv1.Dep common.LabelKusciaDeploymentUID: string(partyKitInfo.kd.UID), common.LabelKusciaDeploymentName: partyKitInfo.kd.Name, common.LabelKubernetesDeploymentName: partyKitInfo.dkInfo.deploymentName, + common.LabelCommunicationRoleServer: "true", + common.LabelCommunicationRoleClient: "true", } ns, err := c.namespaceLister.Get(partyKitInfo.domainID) diff --git a/pkg/controllers/kusciatask/handler/pending_handler.go b/pkg/controllers/kusciatask/handler/pending_handler.go index b953b1af..aceb1ba3 100644 --- a/pkg/controllers/kusciatask/handler/pending_handler.go +++ b/pkg/controllers/kusciatask/handler/pending_handler.go @@ -486,13 +486,16 @@ func (h *PendingHandler) createResourceForParty(partyKit *PartyKitInfo) (map[str return nil, nil, fmt.Errorf("failed to generate service %q, %v", serviceName, err) } - if err := h.submitService(service, pod); err != nil { + if err = h.submitService(service, pod); err != nil { return nil, nil, fmt.Errorf("failed to submit service %q, %v", serviceName, err) } serviceStatuses[service.Namespace+"/"+service.Name] = &kusciaapisv1alpha1.ServiceStatus{ - Namespace: service.GetNamespace(), + Namespace: service.Namespace, ServiceName: service.Name, + PortName: portName, + PortNumber: ctrPort.Port, + Scope: ctrPort.Scope, } } } @@ -842,6 +845,7 @@ func generateServices(partyKit *PartyKitInfo, pod *v1.Pod, serviceName string, p } svc.Labels = map[string]string{ + common.LabelPortName: port.Name, common.LabelPortScope: string(port.Scope), common.LabelInitiator: partyKit.kusciaTask.Spec.Initiator, } diff --git a/pkg/coredns/setup.go b/pkg/coredns/setup.go index afd9f70b..26f2c496 100644 --- a/pkg/coredns/setup.go +++ b/pkg/coredns/setup.go @@ -35,7 +35,7 @@ const ( defaultExpiration = 30 * time.Minute ) -var localService = []string{"datamesh", "confmanager", "dataproxy"} +var localService = []string{"datamesh", "confmanager", "dataproxy", "kusciaapi"} func KusciaParse(c *caddy.Controller, namespace, envoyIP string) (*KusciaCoreDNS, error) { etc := KusciaCoreDNS{ diff --git a/pkg/crd/apis/kuscia/v1alpha1/domain_types.go b/pkg/crd/apis/kuscia/v1alpha1/domain_types.go index b097ddd8..b79162be 100644 --- a/pkg/crd/apis/kuscia/v1alpha1/domain_types.go +++ b/pkg/crd/apis/kuscia/v1alpha1/domain_types.go @@ -60,6 +60,13 @@ type DomainSpec struct { // +optional Role DomainRole `json:"role,omitempty"` + // MasterDomain is used to represent the master domain id of current domain. + // For a omit domain, MasterDomain is exactly local cluster's master + // For a partner domain, the default MasterDomain is the domain itself + // Only for a partner domain which is not an autonomy domain, you need to specify its master domain explicitly + // +optional + MasterDomain string `json:"master,omitempty"` + // Interconnection Protocols // If multiple protocols are specified, select the protocols in the order of configuration. // +optional diff --git a/pkg/crd/apis/kuscia/v1alpha1/domainroute_types.go b/pkg/crd/apis/kuscia/v1alpha1/domainroute_types.go index 079dd155..fcac9f6e 100644 --- a/pkg/crd/apis/kuscia/v1alpha1/domainroute_types.go +++ b/pkg/crd/apis/kuscia/v1alpha1/domainroute_types.go @@ -326,8 +326,8 @@ type DomainRouteMTLSConfig struct { // DomainRouteStatus represents information about the status of DomainRoute. type DomainRouteStatus struct { - IsDestinationAuthrized bool `json:"isDestinationAuthrized"` - IsDestinationUnreachable bool `json:"IsDestinationUnreachable"` + IsDestinationAuthorized bool `json:"isDestinationAuthorized"` + IsDestinationUnreachable bool `json:"isDestinationUnreachable"` // +optional TokenStatus DomainRouteTokenStatus `json:"tokenStatus,omitempty"` } diff --git a/pkg/crd/apis/kuscia/v1alpha1/kusciadeployment_types.go b/pkg/crd/apis/kuscia/v1alpha1/kusciadeployment_types.go index f4ddbfd9..a130480d 100644 --- a/pkg/crd/apis/kuscia/v1alpha1/kusciadeployment_types.go +++ b/pkg/crd/apis/kuscia/v1alpha1/kusciadeployment_types.go @@ -42,6 +42,25 @@ type KusciaDeployment struct { Status KusciaDeploymentStatus `json:"status,omitempty"` } +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:printcolumn:name="TotalParties",type=integer,JSONPath=`.status.totalParties` +// +kubebuilder:printcolumn:name="AvailableParties",type=integer,JSONPath=`.status.availableParties` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=kbd + +// KusciaBetaDeployment is the Schema for the kuscia deployment API. +type KusciaBetaDeployment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec KusciaDeploymentSpec `json:"spec"` + // +optional + Status KusciaDeploymentStatus `json:"status,omitempty"` +} + // +kubebuilder:object:root=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -52,6 +71,16 @@ type KusciaDeploymentList struct { Items []KusciaDeployment `json:"items"` } +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KusciaBetaDeploymentList contains a list of kuscia deployments. +type KusciaBetaDeploymentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KusciaBetaDeployment `json:"items"` +} + // KusciaDeploymentSpec defines the information of kuscia deployment spec. type KusciaDeploymentSpec struct { Initiator string `json:"initiator"` @@ -148,3 +177,38 @@ const ( // KusciaDeploymentPhaseFailed means failed to parse and create deployment. KusciaDeploymentPhaseFailed KusciaDeploymentPhase = "Failed" ) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:printcolumn:name="TotalParties",type=integer,JSONPath=`.status.totalParties` +// +kubebuilder:printcolumn:name="AvailableParties",type=integer,JSONPath=`.status.availableParties` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=kds + +// KusciaDeploymentSummary is used to sync deployment status between clusters +type KusciaDeploymentSummary struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec KusciaDeploymentSummarySpec `json:"spec"` + // +optional + Status KusciaDeploymentStatus `json:"status,omitempty"` +} + +// KusciaDeploymentSummarySpec defines the information of kuscia deployment spec. +type KusciaDeploymentSummarySpec struct { + KusciaDeploymentID string `json:"KusciaDeploymentID"` +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KusciaDeploymentSummaryList contains a list of kuscia deployments. +type KusciaDeploymentSummaryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KusciaDeploymentSummary `json:"items"` +} diff --git a/pkg/crd/apis/kuscia/v1alpha1/kusciajob_types.go b/pkg/crd/apis/kuscia/v1alpha1/kusciajob_types.go index d5f45149..179f34e8 100644 --- a/pkg/crd/apis/kuscia/v1alpha1/kusciajob_types.go +++ b/pkg/crd/apis/kuscia/v1alpha1/kusciajob_types.go @@ -42,6 +42,25 @@ type KusciaJob struct { Status KusciaJobStatus `json:"status,omitempty"` } +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:printcolumn:name="StartTime",type=date,JSONPath=`.status.startTime` +// +kubebuilder:printcolumn:name="CompletionTime",type=date,JSONPath=`.status.completionTime` +// +kubebuilder:printcolumn:name="LastReconcileTime",type=date,JSONPath=`.status.lastReconcileTime` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=kbj + +// KusciaBetaJob is the Schema for the kuscia beta job API. +type KusciaBetaJob struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec KusciaBetaJobSpec `json:"spec"` + // +optional + Status KusciaJobStatus `json:"status,omitempty"` +} + // +kubebuilder:object:root=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -52,6 +71,16 @@ type KusciaJobList struct { Items []KusciaJob `json:"items"` } +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KusciaBetaJobList contains a list of kuscia jobs. +type KusciaBetaJobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KusciaBetaJob `json:"items"` +} + type JobStage string const ( @@ -66,6 +95,38 @@ type KusciaJobSpec struct { // +optional // +kubebuilder:default=Create Stage JobStage `json:"stage,omitempty"` + + // FlowID defines the id of flow + FlowID string `json:"flowID,omitempty"` + // Initiator who schedule this KusciaJob. + Initiator string `json:"initiator"` + // ScheduleMode defines how this job will be scheduled. + // In Strict, if any non-tolerable subtasks failed, Scheduling for this task stops immediately, and it immediately enters the final Failed state. + // In BestEffort, if any non-tolerable subtasks failed, Scheduling for this job will continue. + // But the successor subtask of the failed subtask stops scheduling, and the current state will be running. + // When all subtasks succeed or fail, the job will enter the Failed state. + // +optional + // +kubebuilder:validation:Enum=Strict;BestEffort + // +kubebuilder:default=Strict + ScheduleMode KusciaJobScheduleMode `json:"scheduleMode,omitempty"` + // MaxParallelism max parallelism of tasks, default 1. + // At a certain moment, there may be multiple subtasks that can be scheduled. + // this field defines the maximum number of tasks in the Running state. + // +optional + // +kubebuilder:default=1 + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=128 + MaxParallelism *int `json:"maxParallelism,omitempty"` + // Tasks defines the subtasks participating in scheduling and their dependencies, + // and the subtasks and dependencies should constitute a directed acyclic graph. + // During runtime, each subtask will be created as a KusciaTask. + // +kubebuilder:validation:MaxItems=128 + // +kubebuilder:validation:MinItems=1 + Tasks []KusciaTaskTemplate `json:"tasks"` +} + +// KusciaBetaJobSpec defines the information of kuscia beta job spec. +type KusciaBetaJobSpec struct { // FlowID defines the id of flow FlowID string `json:"flowID,omitempty"` // Initiator who schedule this KusciaJob. @@ -143,6 +204,15 @@ type KusciaJobStatus struct { // +optional Phase KusciaJobPhase `json:"phase,omitempty"` + // job approve status of each party, if job controller is configured with "AutoApproved", + // the party's approved status will be initiated with "JobAccepted" + // +optional + ApproveStatus map[string]JobApprovePhase `json:"approveStatus,omitempty"` + + // job stage status of each party, + // +optional + StageStatus map[string]JobStagePhase `json:"stageStatus,omitempty"` + // The latest available observations of an object's current state. // +optional Conditions []KusciaJobCondition `json:"conditions,omitempty"` @@ -155,6 +225,10 @@ type KusciaJobStatus struct { // +optional Message string `json:"message,omitempty"` + // PartyTaskCreateStatus describes the created status of party task. + // +optional + PartyTaskCreateStatus map[string][]PartyTaskCreateStatus `json:"partyTaskCreateStatus,omitempty"` + // TaskStatus describes subtasks state. The key is taskId. // Uncreated subtasks will not appear here. // +optional @@ -179,6 +253,24 @@ type KusciaJobStatus struct { LastReconcileTime *metav1.Time `json:"lastReconcileTime,omitempty"` } +// PartyTaskCreateStatus defines party task create status. +type PartyTaskCreateStatus struct { + DomainID string `json:"domainID"` + // +optional + Role string `json:"role,omitempty"` + // +optional + Phase KusciaTaskCreatePhase `json:"phase,omitempty"` + // +optional + Message string `json:"message,omitempty"` +} + +type KusciaTaskCreatePhase string + +const ( + KusciaTaskCreateSucceeded KusciaTaskCreatePhase = "Succeeded" + KusciaTaskCreateFailed KusciaTaskCreatePhase = "Failed" +) + // KusciaJobScheduleMode defines how this job will be scheduled. type KusciaJobScheduleMode string @@ -252,3 +344,60 @@ const ( // KusciaJobFailed means least one non-tolerable tasks are failed and kuscia job scheduling is stopped. KusciaJobFailed KusciaJobPhase = "Failed" ) + +type JobStagePhase string + +const ( + JobCreateStageSucceeded JobStagePhase = "JobCreateStageSucceeded" + JobCreateStageFailed JobStagePhase = "JobCreateStageFailed" + + JobStopStageSucceeded JobStagePhase = "JobStopStageSucceeded" + JobStopStageFailed JobStagePhase = "JobStopStageFailed" +) + +type JobApprovePhase string + +const ( + JobRejected JobApprovePhase = "JobRejected" + JobAccepted JobApprovePhase = "JobAccepted" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:printcolumn:name="StartTime",type=date,JSONPath=`.status.startTime` +// +kubebuilder:printcolumn:name="CompletionTime",type=date,JSONPath=`.status.completionTime` +// +kubebuilder:printcolumn:name="LastReconcileTime",type=date,JSONPath=`.status.lastReconcileTime` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=kjs + +// KusciaJobSummary is used to sync job status between clusters +type KusciaJobSummary struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec KusciaJobSummarySpec `json:"spec"` + // +optional + Status KusciaJobStatus `json:"status,omitempty"` +} + +type KusciaJobSummarySpec struct { + // Stage defines the current situation of a job. + // +optional + // +kubebuilder:default=Create + Stage JobStage `json:"stage,omitempty"` + + // StageTrigger refers to the party who trigger current stage + // +optional + StageTrigger string `json:"stageTrigger,omitempty"` +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KusciaJobSummaryList contains a list of kuscia tasks. +type KusciaJobSummaryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KusciaJobSummary `json:"items"` +} diff --git a/pkg/crd/apis/kuscia/v1alpha1/kusciatask_types.go b/pkg/crd/apis/kuscia/v1alpha1/kusciatask_types.go index bf6385c6..7c7c677f 100644 --- a/pkg/crd/apis/kuscia/v1alpha1/kusciatask_types.go +++ b/pkg/crd/apis/kuscia/v1alpha1/kusciatask_types.go @@ -42,6 +42,25 @@ type KusciaTask struct { Status KusciaTaskStatus `json:"status,omitempty"` } +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:printcolumn:name="StartTime",type=date,JSONPath=`.status.startTime` +// +kubebuilder:printcolumn:name="CompletionTime",type=date,JSONPath=`.status.completionTime` +// +kubebuilder:printcolumn:name="LastReconcileTime",type=date,JSONPath=`.status.lastReconcileTime` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=kbt + +// KusciaBetaTask is the Schema for the namespace kuscia task API. +type KusciaBetaTask struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec KusciaTaskSpec `json:"spec"` + // +optional + Status KusciaTaskStatus `json:"status,omitempty"` +} + // +kubebuilder:object:root=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -52,6 +71,16 @@ type KusciaTaskList struct { Items []KusciaTask `json:"items"` } +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KusciaBetaTaskList contains a list of kuscia tasks. +type KusciaBetaTaskList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KusciaBetaTask `json:"items"` +} + // KusciaTaskSpec defines the information of kuscia task spec. type KusciaTaskSpec struct { Initiator string `json:"initiator"` @@ -259,6 +288,15 @@ type ServiceStatus struct { Namespace string `json:"namespace"` // Service name. ServiceName string `json:"serviceName"` + // Service's port name which defined in AppImage container port. + // +optional + PortName string `json:"portName,omitempty"` + // Service's port number which defined in AppImage container port. + // +optional + PortNumber int32 `json:"portNumber,omitempty"` + // Service's port scope which defined in AppImage container port. + // +optional + Scope PortScope `json:"scope,omitempty"` // A brief CamelCase message indicating details about why the service is in this state. // e.g. 'Evicted' // +optional @@ -276,3 +314,47 @@ type ServiceStatus struct { // +optional ReadyTime *metav1.Time `json:"readyTime,omitempty"` } + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="TaskID",type=string,JSONPath=`.spec.taskID` +// +kubebuilder:printcolumn:name="JobID",type=string,JSONPath=`.spec.jobID` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:resource:shortName=kts + +// KusciaTaskSummary is used to sync task status between clusters +type KusciaTaskSummary struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec KusciaTaskSummarySpec `json:"spec"` + + // +optional + Status KusciaTaskSummaryStatus `json:"status,omitempty"` +} + +// KusciaTaskSummarySpec defines the information of kuscia task spec. +type KusciaTaskSummarySpec struct { + Alias string `json:"alias"` + JobID string `json:"jobID"` +} + +type KusciaTaskSummaryStatus struct { + KusciaTaskStatus `json:",inline"` + + // resourceStatus refers to each party resource status + // +optional + ResourceStatus map[string]TaskResourceStatus `json:"resourceStatus,omitempty"` +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KusciaTaskSummaryList contains a list of kuscia tasks. +type KusciaTaskSummaryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KusciaTaskSummary `json:"items"` +} diff --git a/pkg/crd/apis/kuscia/v1alpha1/register.go b/pkg/crd/apis/kuscia/v1alpha1/register.go index f9d16071..edb3429b 100644 --- a/pkg/crd/apis/kuscia/v1alpha1/register.go +++ b/pkg/crd/apis/kuscia/v1alpha1/register.go @@ -76,6 +76,14 @@ func addKnownTypes(scheme *runtime.Scheme) error { &InteropConfigList{}, &KusciaDeployment{}, &KusciaDeploymentList{}, + &KusciaBetaJob{}, + &KusciaBetaJobList{}, + &KusciaJobSummary{}, + &KusciaJobSummaryList{}, + &KusciaBetaTask{}, + &KusciaBetaTaskList{}, + &KusciaTaskSummary{}, + &KusciaTaskSummaryList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/pkg/crd/apis/kuscia/v1alpha1/zz_generated.deepcopy.go b/pkg/crd/apis/kuscia/v1alpha1/zz_generated.deepcopy.go index 53e63473..d33b3037 100644 --- a/pkg/crd/apis/kuscia/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/crd/apis/kuscia/v1alpha1/zz_generated.deepcopy.go @@ -1620,6 +1620,217 @@ func (in *InteropConfigSpec) DeepCopy() *InteropConfigSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaBetaDeployment) DeepCopyInto(out *KusciaBetaDeployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaBetaDeployment. +func (in *KusciaBetaDeployment) DeepCopy() *KusciaBetaDeployment { + if in == nil { + return nil + } + out := new(KusciaBetaDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KusciaBetaDeployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaBetaDeploymentList) DeepCopyInto(out *KusciaBetaDeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KusciaBetaDeployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaBetaDeploymentList. +func (in *KusciaBetaDeploymentList) DeepCopy() *KusciaBetaDeploymentList { + if in == nil { + return nil + } + out := new(KusciaBetaDeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KusciaBetaDeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaBetaJob) DeepCopyInto(out *KusciaBetaJob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaBetaJob. +func (in *KusciaBetaJob) DeepCopy() *KusciaBetaJob { + if in == nil { + return nil + } + out := new(KusciaBetaJob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KusciaBetaJob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaBetaJobList) DeepCopyInto(out *KusciaBetaJobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KusciaBetaJob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaBetaJobList. +func (in *KusciaBetaJobList) DeepCopy() *KusciaBetaJobList { + if in == nil { + return nil + } + out := new(KusciaBetaJobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KusciaBetaJobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaBetaJobSpec) DeepCopyInto(out *KusciaBetaJobSpec) { + *out = *in + if in.MaxParallelism != nil { + in, out := &in.MaxParallelism, &out.MaxParallelism + *out = new(int) + **out = **in + } + if in.Tasks != nil { + in, out := &in.Tasks, &out.Tasks + *out = make([]KusciaTaskTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaBetaJobSpec. +func (in *KusciaBetaJobSpec) DeepCopy() *KusciaBetaJobSpec { + if in == nil { + return nil + } + out := new(KusciaBetaJobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaBetaTask) DeepCopyInto(out *KusciaBetaTask) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaBetaTask. +func (in *KusciaBetaTask) DeepCopy() *KusciaBetaTask { + if in == nil { + return nil + } + out := new(KusciaBetaTask) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KusciaBetaTask) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaBetaTaskList) DeepCopyInto(out *KusciaBetaTaskList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KusciaBetaTask, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaBetaTaskList. +func (in *KusciaBetaTaskList) DeepCopy() *KusciaBetaTaskList { + if in == nil { + return nil + } + out := new(KusciaBetaTaskList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KusciaBetaTaskList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KusciaDeployment) DeepCopyInto(out *KusciaDeployment) { *out = *in @@ -1820,6 +2031,83 @@ func (in *KusciaDeploymentStatus) DeepCopy() *KusciaDeploymentStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaDeploymentSummary) DeepCopyInto(out *KusciaDeploymentSummary) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaDeploymentSummary. +func (in *KusciaDeploymentSummary) DeepCopy() *KusciaDeploymentSummary { + if in == nil { + return nil + } + out := new(KusciaDeploymentSummary) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KusciaDeploymentSummary) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaDeploymentSummaryList) DeepCopyInto(out *KusciaDeploymentSummaryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KusciaDeploymentSummary, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaDeploymentSummaryList. +func (in *KusciaDeploymentSummaryList) DeepCopy() *KusciaDeploymentSummaryList { + if in == nil { + return nil + } + out := new(KusciaDeploymentSummaryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KusciaDeploymentSummaryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaDeploymentSummarySpec) DeepCopyInto(out *KusciaDeploymentSummarySpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaDeploymentSummarySpec. +func (in *KusciaDeploymentSummarySpec) DeepCopy() *KusciaDeploymentSummarySpec { + if in == nil { + return nil + } + out := new(KusciaDeploymentSummarySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KusciaJob) DeepCopyInto(out *KusciaJob) { *out = *in @@ -1932,6 +2220,20 @@ func (in *KusciaJobSpec) DeepCopy() *KusciaJobSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KusciaJobStatus) DeepCopyInto(out *KusciaJobStatus) { *out = *in + if in.ApproveStatus != nil { + in, out := &in.ApproveStatus, &out.ApproveStatus + *out = make(map[string]JobApprovePhase, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.StageStatus != nil { + in, out := &in.StageStatus, &out.StageStatus + *out = make(map[string]JobStagePhase, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]KusciaJobCondition, len(*in)) @@ -1939,6 +2241,21 @@ func (in *KusciaJobStatus) DeepCopyInto(out *KusciaJobStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PartyTaskCreateStatus != nil { + in, out := &in.PartyTaskCreateStatus, &out.PartyTaskCreateStatus + *out = make(map[string][]PartyTaskCreateStatus, len(*in)) + for key, val := range *in { + var outVal []PartyTaskCreateStatus + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]PartyTaskCreateStatus, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } if in.TaskStatus != nil { in, out := &in.TaskStatus, &out.TaskStatus *out = make(map[string]KusciaTaskPhase, len(*in)) @@ -1971,6 +2288,83 @@ func (in *KusciaJobStatus) DeepCopy() *KusciaJobStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaJobSummary) DeepCopyInto(out *KusciaJobSummary) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaJobSummary. +func (in *KusciaJobSummary) DeepCopy() *KusciaJobSummary { + if in == nil { + return nil + } + out := new(KusciaJobSummary) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KusciaJobSummary) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaJobSummaryList) DeepCopyInto(out *KusciaJobSummaryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KusciaJobSummary, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaJobSummaryList. +func (in *KusciaJobSummaryList) DeepCopy() *KusciaJobSummaryList { + if in == nil { + return nil + } + out := new(KusciaJobSummaryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KusciaJobSummaryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaJobSummarySpec) DeepCopyInto(out *KusciaJobSummarySpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaJobSummarySpec. +func (in *KusciaJobSummarySpec) DeepCopy() *KusciaJobSummarySpec { + if in == nil { + return nil + } + out := new(KusciaJobSummarySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KusciaTask) DeepCopyInto(out *KusciaTask) { *out = *in @@ -2146,6 +2540,107 @@ func (in *KusciaTaskStatus) DeepCopy() *KusciaTaskStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaTaskSummary) DeepCopyInto(out *KusciaTaskSummary) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaTaskSummary. +func (in *KusciaTaskSummary) DeepCopy() *KusciaTaskSummary { + if in == nil { + return nil + } + out := new(KusciaTaskSummary) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KusciaTaskSummary) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaTaskSummaryList) DeepCopyInto(out *KusciaTaskSummaryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KusciaTaskSummary, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaTaskSummaryList. +func (in *KusciaTaskSummaryList) DeepCopy() *KusciaTaskSummaryList { + if in == nil { + return nil + } + out := new(KusciaTaskSummaryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KusciaTaskSummaryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaTaskSummarySpec) DeepCopyInto(out *KusciaTaskSummarySpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaTaskSummarySpec. +func (in *KusciaTaskSummarySpec) DeepCopy() *KusciaTaskSummarySpec { + if in == nil { + return nil + } + out := new(KusciaTaskSummarySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KusciaTaskSummaryStatus) DeepCopyInto(out *KusciaTaskSummaryStatus) { + *out = *in + in.KusciaTaskStatus.DeepCopyInto(&out.KusciaTaskStatus) + if in.ResourceStatus != nil { + in, out := &in.ResourceStatus, &out.ResourceStatus + *out = make(map[string]TaskResourceStatus, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KusciaTaskSummaryStatus. +func (in *KusciaTaskSummaryStatus) DeepCopy() *KusciaTaskSummaryStatus { + if in == nil { + return nil + } + out := new(KusciaTaskSummaryStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KusciaTaskTemplate) DeepCopyInto(out *KusciaTaskTemplate) { *out = *in @@ -2277,6 +2772,22 @@ func (in *PartyInfo) DeepCopy() *PartyInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartyTaskCreateStatus) DeepCopyInto(out *PartyTaskCreateStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartyTaskCreateStatus. +func (in *PartyTaskCreateStatus) DeepCopy() *PartyTaskCreateStatus { + if in == nil { + return nil + } + out := new(PartyTaskCreateStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PartyTaskStatus) DeepCopyInto(out *PartyTaskStatus) { *out = *in diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kuscia_client.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kuscia_client.go index aa00a589..384dac3f 100644 --- a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kuscia_client.go +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kuscia_client.go @@ -66,18 +66,42 @@ func (c *FakeKusciaV1alpha1) InteropConfigs() v1alpha1.InteropConfigInterface { return &FakeInteropConfigs{c} } +func (c *FakeKusciaV1alpha1) KusciaBetaDeployments(namespace string) v1alpha1.KusciaBetaDeploymentInterface { + return &FakeKusciaBetaDeployments{c, namespace} +} + +func (c *FakeKusciaV1alpha1) KusciaBetaJobs(namespace string) v1alpha1.KusciaBetaJobInterface { + return &FakeKusciaBetaJobs{c, namespace} +} + +func (c *FakeKusciaV1alpha1) KusciaBetaTasks(namespace string) v1alpha1.KusciaBetaTaskInterface { + return &FakeKusciaBetaTasks{c, namespace} +} + func (c *FakeKusciaV1alpha1) KusciaDeployments() v1alpha1.KusciaDeploymentInterface { return &FakeKusciaDeployments{c} } +func (c *FakeKusciaV1alpha1) KusciaDeploymentSummaries(namespace string) v1alpha1.KusciaDeploymentSummaryInterface { + return &FakeKusciaDeploymentSummaries{c, namespace} +} + func (c *FakeKusciaV1alpha1) KusciaJobs() v1alpha1.KusciaJobInterface { return &FakeKusciaJobs{c} } +func (c *FakeKusciaV1alpha1) KusciaJobSummaries(namespace string) v1alpha1.KusciaJobSummaryInterface { + return &FakeKusciaJobSummaries{c, namespace} +} + func (c *FakeKusciaV1alpha1) KusciaTasks() v1alpha1.KusciaTaskInterface { return &FakeKusciaTasks{c} } +func (c *FakeKusciaV1alpha1) KusciaTaskSummaries(namespace string) v1alpha1.KusciaTaskSummaryInterface { + return &FakeKusciaTaskSummaries{c, namespace} +} + func (c *FakeKusciaV1alpha1) TaskResources(namespace string) v1alpha1.TaskResourceInterface { return &FakeTaskResources{c, namespace} } diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciabetadeployment.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciabetadeployment.go new file mode 100644 index 00000000..574cb6a6 --- /dev/null +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciabetadeployment.go @@ -0,0 +1,140 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKusciaBetaDeployments implements KusciaBetaDeploymentInterface +type FakeKusciaBetaDeployments struct { + Fake *FakeKusciaV1alpha1 + ns string +} + +var kusciabetadeploymentsResource = schema.GroupVersionResource{Group: "kuscia.secretflow", Version: "v1alpha1", Resource: "kusciabetadeployments"} + +var kusciabetadeploymentsKind = schema.GroupVersionKind{Group: "kuscia.secretflow", Version: "v1alpha1", Kind: "KusciaBetaDeployment"} + +// Get takes name of the kusciaBetaDeployment, and returns the corresponding kusciaBetaDeployment object, and an error if there is any. +func (c *FakeKusciaBetaDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KusciaBetaDeployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(kusciabetadeploymentsResource, c.ns, name), &v1alpha1.KusciaBetaDeployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaDeployment), err +} + +// List takes label and field selectors, and returns the list of KusciaBetaDeployments that match those selectors. +func (c *FakeKusciaBetaDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KusciaBetaDeploymentList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(kusciabetadeploymentsResource, kusciabetadeploymentsKind, c.ns, opts), &v1alpha1.KusciaBetaDeploymentList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.KusciaBetaDeploymentList{ListMeta: obj.(*v1alpha1.KusciaBetaDeploymentList).ListMeta} + for _, item := range obj.(*v1alpha1.KusciaBetaDeploymentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kusciaBetaDeployments. +func (c *FakeKusciaBetaDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(kusciabetadeploymentsResource, c.ns, opts)) + +} + +// Create takes the representation of a kusciaBetaDeployment and creates it. Returns the server's representation of the kusciaBetaDeployment, and an error, if there is any. +func (c *FakeKusciaBetaDeployments) Create(ctx context.Context, kusciaBetaDeployment *v1alpha1.KusciaBetaDeployment, opts v1.CreateOptions) (result *v1alpha1.KusciaBetaDeployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(kusciabetadeploymentsResource, c.ns, kusciaBetaDeployment), &v1alpha1.KusciaBetaDeployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaDeployment), err +} + +// Update takes the representation of a kusciaBetaDeployment and updates it. Returns the server's representation of the kusciaBetaDeployment, and an error, if there is any. +func (c *FakeKusciaBetaDeployments) Update(ctx context.Context, kusciaBetaDeployment *v1alpha1.KusciaBetaDeployment, opts v1.UpdateOptions) (result *v1alpha1.KusciaBetaDeployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(kusciabetadeploymentsResource, c.ns, kusciaBetaDeployment), &v1alpha1.KusciaBetaDeployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaDeployment), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKusciaBetaDeployments) UpdateStatus(ctx context.Context, kusciaBetaDeployment *v1alpha1.KusciaBetaDeployment, opts v1.UpdateOptions) (*v1alpha1.KusciaBetaDeployment, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kusciabetadeploymentsResource, "status", c.ns, kusciaBetaDeployment), &v1alpha1.KusciaBetaDeployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaDeployment), err +} + +// Delete takes name of the kusciaBetaDeployment and deletes it. Returns an error if one occurs. +func (c *FakeKusciaBetaDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(kusciabetadeploymentsResource, c.ns, name, opts), &v1alpha1.KusciaBetaDeployment{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKusciaBetaDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(kusciabetadeploymentsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.KusciaBetaDeploymentList{}) + return err +} + +// Patch applies the patch and returns the patched kusciaBetaDeployment. +func (c *FakeKusciaBetaDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaBetaDeployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(kusciabetadeploymentsResource, c.ns, name, pt, data, subresources...), &v1alpha1.KusciaBetaDeployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaDeployment), err +} diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciabetajob.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciabetajob.go new file mode 100644 index 00000000..5cae4d3d --- /dev/null +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciabetajob.go @@ -0,0 +1,140 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKusciaBetaJobs implements KusciaBetaJobInterface +type FakeKusciaBetaJobs struct { + Fake *FakeKusciaV1alpha1 + ns string +} + +var kusciabetajobsResource = schema.GroupVersionResource{Group: "kuscia.secretflow", Version: "v1alpha1", Resource: "kusciabetajobs"} + +var kusciabetajobsKind = schema.GroupVersionKind{Group: "kuscia.secretflow", Version: "v1alpha1", Kind: "KusciaBetaJob"} + +// Get takes name of the kusciaBetaJob, and returns the corresponding kusciaBetaJob object, and an error if there is any. +func (c *FakeKusciaBetaJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KusciaBetaJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(kusciabetajobsResource, c.ns, name), &v1alpha1.KusciaBetaJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaJob), err +} + +// List takes label and field selectors, and returns the list of KusciaBetaJobs that match those selectors. +func (c *FakeKusciaBetaJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KusciaBetaJobList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(kusciabetajobsResource, kusciabetajobsKind, c.ns, opts), &v1alpha1.KusciaBetaJobList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.KusciaBetaJobList{ListMeta: obj.(*v1alpha1.KusciaBetaJobList).ListMeta} + for _, item := range obj.(*v1alpha1.KusciaBetaJobList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kusciaBetaJobs. +func (c *FakeKusciaBetaJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(kusciabetajobsResource, c.ns, opts)) + +} + +// Create takes the representation of a kusciaBetaJob and creates it. Returns the server's representation of the kusciaBetaJob, and an error, if there is any. +func (c *FakeKusciaBetaJobs) Create(ctx context.Context, kusciaBetaJob *v1alpha1.KusciaBetaJob, opts v1.CreateOptions) (result *v1alpha1.KusciaBetaJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(kusciabetajobsResource, c.ns, kusciaBetaJob), &v1alpha1.KusciaBetaJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaJob), err +} + +// Update takes the representation of a kusciaBetaJob and updates it. Returns the server's representation of the kusciaBetaJob, and an error, if there is any. +func (c *FakeKusciaBetaJobs) Update(ctx context.Context, kusciaBetaJob *v1alpha1.KusciaBetaJob, opts v1.UpdateOptions) (result *v1alpha1.KusciaBetaJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(kusciabetajobsResource, c.ns, kusciaBetaJob), &v1alpha1.KusciaBetaJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaJob), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKusciaBetaJobs) UpdateStatus(ctx context.Context, kusciaBetaJob *v1alpha1.KusciaBetaJob, opts v1.UpdateOptions) (*v1alpha1.KusciaBetaJob, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kusciabetajobsResource, "status", c.ns, kusciaBetaJob), &v1alpha1.KusciaBetaJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaJob), err +} + +// Delete takes name of the kusciaBetaJob and deletes it. Returns an error if one occurs. +func (c *FakeKusciaBetaJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(kusciabetajobsResource, c.ns, name, opts), &v1alpha1.KusciaBetaJob{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKusciaBetaJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(kusciabetajobsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.KusciaBetaJobList{}) + return err +} + +// Patch applies the patch and returns the patched kusciaBetaJob. +func (c *FakeKusciaBetaJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaBetaJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(kusciabetajobsResource, c.ns, name, pt, data, subresources...), &v1alpha1.KusciaBetaJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaJob), err +} diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciabetatask.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciabetatask.go new file mode 100644 index 00000000..7154e783 --- /dev/null +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciabetatask.go @@ -0,0 +1,140 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKusciaBetaTasks implements KusciaBetaTaskInterface +type FakeKusciaBetaTasks struct { + Fake *FakeKusciaV1alpha1 + ns string +} + +var kusciabetatasksResource = schema.GroupVersionResource{Group: "kuscia.secretflow", Version: "v1alpha1", Resource: "kusciabetatasks"} + +var kusciabetatasksKind = schema.GroupVersionKind{Group: "kuscia.secretflow", Version: "v1alpha1", Kind: "KusciaBetaTask"} + +// Get takes name of the kusciaBetaTask, and returns the corresponding kusciaBetaTask object, and an error if there is any. +func (c *FakeKusciaBetaTasks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KusciaBetaTask, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(kusciabetatasksResource, c.ns, name), &v1alpha1.KusciaBetaTask{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaTask), err +} + +// List takes label and field selectors, and returns the list of KusciaBetaTasks that match those selectors. +func (c *FakeKusciaBetaTasks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KusciaBetaTaskList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(kusciabetatasksResource, kusciabetatasksKind, c.ns, opts), &v1alpha1.KusciaBetaTaskList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.KusciaBetaTaskList{ListMeta: obj.(*v1alpha1.KusciaBetaTaskList).ListMeta} + for _, item := range obj.(*v1alpha1.KusciaBetaTaskList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kusciaBetaTasks. +func (c *FakeKusciaBetaTasks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(kusciabetatasksResource, c.ns, opts)) + +} + +// Create takes the representation of a kusciaBetaTask and creates it. Returns the server's representation of the kusciaBetaTask, and an error, if there is any. +func (c *FakeKusciaBetaTasks) Create(ctx context.Context, kusciaBetaTask *v1alpha1.KusciaBetaTask, opts v1.CreateOptions) (result *v1alpha1.KusciaBetaTask, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(kusciabetatasksResource, c.ns, kusciaBetaTask), &v1alpha1.KusciaBetaTask{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaTask), err +} + +// Update takes the representation of a kusciaBetaTask and updates it. Returns the server's representation of the kusciaBetaTask, and an error, if there is any. +func (c *FakeKusciaBetaTasks) Update(ctx context.Context, kusciaBetaTask *v1alpha1.KusciaBetaTask, opts v1.UpdateOptions) (result *v1alpha1.KusciaBetaTask, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(kusciabetatasksResource, c.ns, kusciaBetaTask), &v1alpha1.KusciaBetaTask{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaTask), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKusciaBetaTasks) UpdateStatus(ctx context.Context, kusciaBetaTask *v1alpha1.KusciaBetaTask, opts v1.UpdateOptions) (*v1alpha1.KusciaBetaTask, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kusciabetatasksResource, "status", c.ns, kusciaBetaTask), &v1alpha1.KusciaBetaTask{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaTask), err +} + +// Delete takes name of the kusciaBetaTask and deletes it. Returns an error if one occurs. +func (c *FakeKusciaBetaTasks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(kusciabetatasksResource, c.ns, name, opts), &v1alpha1.KusciaBetaTask{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKusciaBetaTasks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(kusciabetatasksResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.KusciaBetaTaskList{}) + return err +} + +// Patch applies the patch and returns the patched kusciaBetaTask. +func (c *FakeKusciaBetaTasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaBetaTask, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(kusciabetatasksResource, c.ns, name, pt, data, subresources...), &v1alpha1.KusciaBetaTask{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaBetaTask), err +} diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciadeploymentsummary.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciadeploymentsummary.go new file mode 100644 index 00000000..44b693ee --- /dev/null +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciadeploymentsummary.go @@ -0,0 +1,140 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKusciaDeploymentSummaries implements KusciaDeploymentSummaryInterface +type FakeKusciaDeploymentSummaries struct { + Fake *FakeKusciaV1alpha1 + ns string +} + +var kusciadeploymentsummariesResource = schema.GroupVersionResource{Group: "kuscia.secretflow", Version: "v1alpha1", Resource: "kusciadeploymentsummaries"} + +var kusciadeploymentsummariesKind = schema.GroupVersionKind{Group: "kuscia.secretflow", Version: "v1alpha1", Kind: "KusciaDeploymentSummary"} + +// Get takes name of the kusciaDeploymentSummary, and returns the corresponding kusciaDeploymentSummary object, and an error if there is any. +func (c *FakeKusciaDeploymentSummaries) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KusciaDeploymentSummary, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(kusciadeploymentsummariesResource, c.ns, name), &v1alpha1.KusciaDeploymentSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaDeploymentSummary), err +} + +// List takes label and field selectors, and returns the list of KusciaDeploymentSummaries that match those selectors. +func (c *FakeKusciaDeploymentSummaries) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KusciaDeploymentSummaryList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(kusciadeploymentsummariesResource, kusciadeploymentsummariesKind, c.ns, opts), &v1alpha1.KusciaDeploymentSummaryList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.KusciaDeploymentSummaryList{ListMeta: obj.(*v1alpha1.KusciaDeploymentSummaryList).ListMeta} + for _, item := range obj.(*v1alpha1.KusciaDeploymentSummaryList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kusciaDeploymentSummaries. +func (c *FakeKusciaDeploymentSummaries) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(kusciadeploymentsummariesResource, c.ns, opts)) + +} + +// Create takes the representation of a kusciaDeploymentSummary and creates it. Returns the server's representation of the kusciaDeploymentSummary, and an error, if there is any. +func (c *FakeKusciaDeploymentSummaries) Create(ctx context.Context, kusciaDeploymentSummary *v1alpha1.KusciaDeploymentSummary, opts v1.CreateOptions) (result *v1alpha1.KusciaDeploymentSummary, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(kusciadeploymentsummariesResource, c.ns, kusciaDeploymentSummary), &v1alpha1.KusciaDeploymentSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaDeploymentSummary), err +} + +// Update takes the representation of a kusciaDeploymentSummary and updates it. Returns the server's representation of the kusciaDeploymentSummary, and an error, if there is any. +func (c *FakeKusciaDeploymentSummaries) Update(ctx context.Context, kusciaDeploymentSummary *v1alpha1.KusciaDeploymentSummary, opts v1.UpdateOptions) (result *v1alpha1.KusciaDeploymentSummary, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(kusciadeploymentsummariesResource, c.ns, kusciaDeploymentSummary), &v1alpha1.KusciaDeploymentSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaDeploymentSummary), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKusciaDeploymentSummaries) UpdateStatus(ctx context.Context, kusciaDeploymentSummary *v1alpha1.KusciaDeploymentSummary, opts v1.UpdateOptions) (*v1alpha1.KusciaDeploymentSummary, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kusciadeploymentsummariesResource, "status", c.ns, kusciaDeploymentSummary), &v1alpha1.KusciaDeploymentSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaDeploymentSummary), err +} + +// Delete takes name of the kusciaDeploymentSummary and deletes it. Returns an error if one occurs. +func (c *FakeKusciaDeploymentSummaries) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(kusciadeploymentsummariesResource, c.ns, name, opts), &v1alpha1.KusciaDeploymentSummary{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKusciaDeploymentSummaries) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(kusciadeploymentsummariesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.KusciaDeploymentSummaryList{}) + return err +} + +// Patch applies the patch and returns the patched kusciaDeploymentSummary. +func (c *FakeKusciaDeploymentSummaries) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaDeploymentSummary, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(kusciadeploymentsummariesResource, c.ns, name, pt, data, subresources...), &v1alpha1.KusciaDeploymentSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaDeploymentSummary), err +} diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciajobsummary.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciajobsummary.go new file mode 100644 index 00000000..cbe20939 --- /dev/null +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciajobsummary.go @@ -0,0 +1,140 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKusciaJobSummaries implements KusciaJobSummaryInterface +type FakeKusciaJobSummaries struct { + Fake *FakeKusciaV1alpha1 + ns string +} + +var kusciajobsummariesResource = schema.GroupVersionResource{Group: "kuscia.secretflow", Version: "v1alpha1", Resource: "kusciajobsummaries"} + +var kusciajobsummariesKind = schema.GroupVersionKind{Group: "kuscia.secretflow", Version: "v1alpha1", Kind: "KusciaJobSummary"} + +// Get takes name of the kusciaJobSummary, and returns the corresponding kusciaJobSummary object, and an error if there is any. +func (c *FakeKusciaJobSummaries) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KusciaJobSummary, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(kusciajobsummariesResource, c.ns, name), &v1alpha1.KusciaJobSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaJobSummary), err +} + +// List takes label and field selectors, and returns the list of KusciaJobSummaries that match those selectors. +func (c *FakeKusciaJobSummaries) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KusciaJobSummaryList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(kusciajobsummariesResource, kusciajobsummariesKind, c.ns, opts), &v1alpha1.KusciaJobSummaryList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.KusciaJobSummaryList{ListMeta: obj.(*v1alpha1.KusciaJobSummaryList).ListMeta} + for _, item := range obj.(*v1alpha1.KusciaJobSummaryList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kusciaJobSummaries. +func (c *FakeKusciaJobSummaries) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(kusciajobsummariesResource, c.ns, opts)) + +} + +// Create takes the representation of a kusciaJobSummary and creates it. Returns the server's representation of the kusciaJobSummary, and an error, if there is any. +func (c *FakeKusciaJobSummaries) Create(ctx context.Context, kusciaJobSummary *v1alpha1.KusciaJobSummary, opts v1.CreateOptions) (result *v1alpha1.KusciaJobSummary, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(kusciajobsummariesResource, c.ns, kusciaJobSummary), &v1alpha1.KusciaJobSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaJobSummary), err +} + +// Update takes the representation of a kusciaJobSummary and updates it. Returns the server's representation of the kusciaJobSummary, and an error, if there is any. +func (c *FakeKusciaJobSummaries) Update(ctx context.Context, kusciaJobSummary *v1alpha1.KusciaJobSummary, opts v1.UpdateOptions) (result *v1alpha1.KusciaJobSummary, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(kusciajobsummariesResource, c.ns, kusciaJobSummary), &v1alpha1.KusciaJobSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaJobSummary), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKusciaJobSummaries) UpdateStatus(ctx context.Context, kusciaJobSummary *v1alpha1.KusciaJobSummary, opts v1.UpdateOptions) (*v1alpha1.KusciaJobSummary, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kusciajobsummariesResource, "status", c.ns, kusciaJobSummary), &v1alpha1.KusciaJobSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaJobSummary), err +} + +// Delete takes name of the kusciaJobSummary and deletes it. Returns an error if one occurs. +func (c *FakeKusciaJobSummaries) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(kusciajobsummariesResource, c.ns, name, opts), &v1alpha1.KusciaJobSummary{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKusciaJobSummaries) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(kusciajobsummariesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.KusciaJobSummaryList{}) + return err +} + +// Patch applies the patch and returns the patched kusciaJobSummary. +func (c *FakeKusciaJobSummaries) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaJobSummary, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(kusciajobsummariesResource, c.ns, name, pt, data, subresources...), &v1alpha1.KusciaJobSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaJobSummary), err +} diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciatasksummary.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciatasksummary.go new file mode 100644 index 00000000..761c217d --- /dev/null +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/fake/fake_kusciatasksummary.go @@ -0,0 +1,140 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKusciaTaskSummaries implements KusciaTaskSummaryInterface +type FakeKusciaTaskSummaries struct { + Fake *FakeKusciaV1alpha1 + ns string +} + +var kusciatasksummariesResource = schema.GroupVersionResource{Group: "kuscia.secretflow", Version: "v1alpha1", Resource: "kusciatasksummaries"} + +var kusciatasksummariesKind = schema.GroupVersionKind{Group: "kuscia.secretflow", Version: "v1alpha1", Kind: "KusciaTaskSummary"} + +// Get takes name of the kusciaTaskSummary, and returns the corresponding kusciaTaskSummary object, and an error if there is any. +func (c *FakeKusciaTaskSummaries) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KusciaTaskSummary, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(kusciatasksummariesResource, c.ns, name), &v1alpha1.KusciaTaskSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaTaskSummary), err +} + +// List takes label and field selectors, and returns the list of KusciaTaskSummaries that match those selectors. +func (c *FakeKusciaTaskSummaries) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KusciaTaskSummaryList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(kusciatasksummariesResource, kusciatasksummariesKind, c.ns, opts), &v1alpha1.KusciaTaskSummaryList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.KusciaTaskSummaryList{ListMeta: obj.(*v1alpha1.KusciaTaskSummaryList).ListMeta} + for _, item := range obj.(*v1alpha1.KusciaTaskSummaryList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kusciaTaskSummaries. +func (c *FakeKusciaTaskSummaries) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(kusciatasksummariesResource, c.ns, opts)) + +} + +// Create takes the representation of a kusciaTaskSummary and creates it. Returns the server's representation of the kusciaTaskSummary, and an error, if there is any. +func (c *FakeKusciaTaskSummaries) Create(ctx context.Context, kusciaTaskSummary *v1alpha1.KusciaTaskSummary, opts v1.CreateOptions) (result *v1alpha1.KusciaTaskSummary, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(kusciatasksummariesResource, c.ns, kusciaTaskSummary), &v1alpha1.KusciaTaskSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaTaskSummary), err +} + +// Update takes the representation of a kusciaTaskSummary and updates it. Returns the server's representation of the kusciaTaskSummary, and an error, if there is any. +func (c *FakeKusciaTaskSummaries) Update(ctx context.Context, kusciaTaskSummary *v1alpha1.KusciaTaskSummary, opts v1.UpdateOptions) (result *v1alpha1.KusciaTaskSummary, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(kusciatasksummariesResource, c.ns, kusciaTaskSummary), &v1alpha1.KusciaTaskSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaTaskSummary), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKusciaTaskSummaries) UpdateStatus(ctx context.Context, kusciaTaskSummary *v1alpha1.KusciaTaskSummary, opts v1.UpdateOptions) (*v1alpha1.KusciaTaskSummary, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kusciatasksummariesResource, "status", c.ns, kusciaTaskSummary), &v1alpha1.KusciaTaskSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaTaskSummary), err +} + +// Delete takes name of the kusciaTaskSummary and deletes it. Returns an error if one occurs. +func (c *FakeKusciaTaskSummaries) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(kusciatasksummariesResource, c.ns, name, opts), &v1alpha1.KusciaTaskSummary{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKusciaTaskSummaries) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(kusciatasksummariesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.KusciaTaskSummaryList{}) + return err +} + +// Patch applies the patch and returns the patched kusciaTaskSummary. +func (c *FakeKusciaTaskSummaries) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaTaskSummary, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(kusciatasksummariesResource, c.ns, name, pt, data, subresources...), &v1alpha1.KusciaTaskSummary{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KusciaTaskSummary), err +} diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/generated_expansion.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/generated_expansion.go index 3491e110..689b3d5a 100644 --- a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/generated_expansion.go +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/generated_expansion.go @@ -36,12 +36,24 @@ type GatewayExpansion interface{} type InteropConfigExpansion interface{} +type KusciaBetaDeploymentExpansion interface{} + +type KusciaBetaJobExpansion interface{} + +type KusciaBetaTaskExpansion interface{} + type KusciaDeploymentExpansion interface{} +type KusciaDeploymentSummaryExpansion interface{} + type KusciaJobExpansion interface{} +type KusciaJobSummaryExpansion interface{} + type KusciaTaskExpansion interface{} +type KusciaTaskSummaryExpansion interface{} + type TaskResourceExpansion interface{} type TaskResourceGroupExpansion interface{} diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kuscia_client.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kuscia_client.go index 9955ffe5..132eabe4 100644 --- a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kuscia_client.go +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kuscia_client.go @@ -36,9 +36,15 @@ type KusciaV1alpha1Interface interface { DomainRoutesGetter GatewaysGetter InteropConfigsGetter + KusciaBetaDeploymentsGetter + KusciaBetaJobsGetter + KusciaBetaTasksGetter KusciaDeploymentsGetter + KusciaDeploymentSummariesGetter KusciaJobsGetter + KusciaJobSummariesGetter KusciaTasksGetter + KusciaTaskSummariesGetter TaskResourcesGetter TaskResourceGroupsGetter } @@ -88,18 +94,42 @@ func (c *KusciaV1alpha1Client) InteropConfigs() InteropConfigInterface { return newInteropConfigs(c) } +func (c *KusciaV1alpha1Client) KusciaBetaDeployments(namespace string) KusciaBetaDeploymentInterface { + return newKusciaBetaDeployments(c, namespace) +} + +func (c *KusciaV1alpha1Client) KusciaBetaJobs(namespace string) KusciaBetaJobInterface { + return newKusciaBetaJobs(c, namespace) +} + +func (c *KusciaV1alpha1Client) KusciaBetaTasks(namespace string) KusciaBetaTaskInterface { + return newKusciaBetaTasks(c, namespace) +} + func (c *KusciaV1alpha1Client) KusciaDeployments() KusciaDeploymentInterface { return newKusciaDeployments(c) } +func (c *KusciaV1alpha1Client) KusciaDeploymentSummaries(namespace string) KusciaDeploymentSummaryInterface { + return newKusciaDeploymentSummaries(c, namespace) +} + func (c *KusciaV1alpha1Client) KusciaJobs() KusciaJobInterface { return newKusciaJobs(c) } +func (c *KusciaV1alpha1Client) KusciaJobSummaries(namespace string) KusciaJobSummaryInterface { + return newKusciaJobSummaries(c, namespace) +} + func (c *KusciaV1alpha1Client) KusciaTasks() KusciaTaskInterface { return newKusciaTasks(c) } +func (c *KusciaV1alpha1Client) KusciaTaskSummaries(namespace string) KusciaTaskSummaryInterface { + return newKusciaTaskSummaries(c, namespace) +} + func (c *KusciaV1alpha1Client) TaskResources(namespace string) TaskResourceInterface { return newTaskResources(c, namespace) } diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciabetadeployment.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciabetadeployment.go new file mode 100644 index 00000000..42d79c39 --- /dev/null +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciabetadeployment.go @@ -0,0 +1,193 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + scheme "github.com/secretflow/kuscia/pkg/crd/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KusciaBetaDeploymentsGetter has a method to return a KusciaBetaDeploymentInterface. +// A group's client should implement this interface. +type KusciaBetaDeploymentsGetter interface { + KusciaBetaDeployments(namespace string) KusciaBetaDeploymentInterface +} + +// KusciaBetaDeploymentInterface has methods to work with KusciaBetaDeployment resources. +type KusciaBetaDeploymentInterface interface { + Create(ctx context.Context, kusciaBetaDeployment *v1alpha1.KusciaBetaDeployment, opts v1.CreateOptions) (*v1alpha1.KusciaBetaDeployment, error) + Update(ctx context.Context, kusciaBetaDeployment *v1alpha1.KusciaBetaDeployment, opts v1.UpdateOptions) (*v1alpha1.KusciaBetaDeployment, error) + UpdateStatus(ctx context.Context, kusciaBetaDeployment *v1alpha1.KusciaBetaDeployment, opts v1.UpdateOptions) (*v1alpha1.KusciaBetaDeployment, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.KusciaBetaDeployment, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.KusciaBetaDeploymentList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaBetaDeployment, err error) + KusciaBetaDeploymentExpansion +} + +// kusciaBetaDeployments implements KusciaBetaDeploymentInterface +type kusciaBetaDeployments struct { + client rest.Interface + ns string +} + +// newKusciaBetaDeployments returns a KusciaBetaDeployments +func newKusciaBetaDeployments(c *KusciaV1alpha1Client, namespace string) *kusciaBetaDeployments { + return &kusciaBetaDeployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the kusciaBetaDeployment, and returns the corresponding kusciaBetaDeployment object, and an error if there is any. +func (c *kusciaBetaDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KusciaBetaDeployment, err error) { + result = &v1alpha1.KusciaBetaDeployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kusciabetadeployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KusciaBetaDeployments that match those selectors. +func (c *kusciaBetaDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KusciaBetaDeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.KusciaBetaDeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kusciabetadeployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kusciaBetaDeployments. +func (c *kusciaBetaDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("kusciabetadeployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kusciaBetaDeployment and creates it. Returns the server's representation of the kusciaBetaDeployment, and an error, if there is any. +func (c *kusciaBetaDeployments) Create(ctx context.Context, kusciaBetaDeployment *v1alpha1.KusciaBetaDeployment, opts v1.CreateOptions) (result *v1alpha1.KusciaBetaDeployment, err error) { + result = &v1alpha1.KusciaBetaDeployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("kusciabetadeployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaBetaDeployment). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kusciaBetaDeployment and updates it. Returns the server's representation of the kusciaBetaDeployment, and an error, if there is any. +func (c *kusciaBetaDeployments) Update(ctx context.Context, kusciaBetaDeployment *v1alpha1.KusciaBetaDeployment, opts v1.UpdateOptions) (result *v1alpha1.KusciaBetaDeployment, err error) { + result = &v1alpha1.KusciaBetaDeployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kusciabetadeployments"). + Name(kusciaBetaDeployment.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaBetaDeployment). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kusciaBetaDeployments) UpdateStatus(ctx context.Context, kusciaBetaDeployment *v1alpha1.KusciaBetaDeployment, opts v1.UpdateOptions) (result *v1alpha1.KusciaBetaDeployment, err error) { + result = &v1alpha1.KusciaBetaDeployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kusciabetadeployments"). + Name(kusciaBetaDeployment.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaBetaDeployment). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kusciaBetaDeployment and deletes it. Returns an error if one occurs. +func (c *kusciaBetaDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kusciabetadeployments"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kusciaBetaDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("kusciabetadeployments"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kusciaBetaDeployment. +func (c *kusciaBetaDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaBetaDeployment, err error) { + result = &v1alpha1.KusciaBetaDeployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("kusciabetadeployments"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciabetajob.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciabetajob.go new file mode 100644 index 00000000..7b00db8e --- /dev/null +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciabetajob.go @@ -0,0 +1,193 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + scheme "github.com/secretflow/kuscia/pkg/crd/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KusciaBetaJobsGetter has a method to return a KusciaBetaJobInterface. +// A group's client should implement this interface. +type KusciaBetaJobsGetter interface { + KusciaBetaJobs(namespace string) KusciaBetaJobInterface +} + +// KusciaBetaJobInterface has methods to work with KusciaBetaJob resources. +type KusciaBetaJobInterface interface { + Create(ctx context.Context, kusciaBetaJob *v1alpha1.KusciaBetaJob, opts v1.CreateOptions) (*v1alpha1.KusciaBetaJob, error) + Update(ctx context.Context, kusciaBetaJob *v1alpha1.KusciaBetaJob, opts v1.UpdateOptions) (*v1alpha1.KusciaBetaJob, error) + UpdateStatus(ctx context.Context, kusciaBetaJob *v1alpha1.KusciaBetaJob, opts v1.UpdateOptions) (*v1alpha1.KusciaBetaJob, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.KusciaBetaJob, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.KusciaBetaJobList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaBetaJob, err error) + KusciaBetaJobExpansion +} + +// kusciaBetaJobs implements KusciaBetaJobInterface +type kusciaBetaJobs struct { + client rest.Interface + ns string +} + +// newKusciaBetaJobs returns a KusciaBetaJobs +func newKusciaBetaJobs(c *KusciaV1alpha1Client, namespace string) *kusciaBetaJobs { + return &kusciaBetaJobs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the kusciaBetaJob, and returns the corresponding kusciaBetaJob object, and an error if there is any. +func (c *kusciaBetaJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KusciaBetaJob, err error) { + result = &v1alpha1.KusciaBetaJob{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kusciabetajobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KusciaBetaJobs that match those selectors. +func (c *kusciaBetaJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KusciaBetaJobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.KusciaBetaJobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kusciabetajobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kusciaBetaJobs. +func (c *kusciaBetaJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("kusciabetajobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kusciaBetaJob and creates it. Returns the server's representation of the kusciaBetaJob, and an error, if there is any. +func (c *kusciaBetaJobs) Create(ctx context.Context, kusciaBetaJob *v1alpha1.KusciaBetaJob, opts v1.CreateOptions) (result *v1alpha1.KusciaBetaJob, err error) { + result = &v1alpha1.KusciaBetaJob{} + err = c.client.Post(). + Namespace(c.ns). + Resource("kusciabetajobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaBetaJob). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kusciaBetaJob and updates it. Returns the server's representation of the kusciaBetaJob, and an error, if there is any. +func (c *kusciaBetaJobs) Update(ctx context.Context, kusciaBetaJob *v1alpha1.KusciaBetaJob, opts v1.UpdateOptions) (result *v1alpha1.KusciaBetaJob, err error) { + result = &v1alpha1.KusciaBetaJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kusciabetajobs"). + Name(kusciaBetaJob.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaBetaJob). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kusciaBetaJobs) UpdateStatus(ctx context.Context, kusciaBetaJob *v1alpha1.KusciaBetaJob, opts v1.UpdateOptions) (result *v1alpha1.KusciaBetaJob, err error) { + result = &v1alpha1.KusciaBetaJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kusciabetajobs"). + Name(kusciaBetaJob.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaBetaJob). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kusciaBetaJob and deletes it. Returns an error if one occurs. +func (c *kusciaBetaJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kusciabetajobs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kusciaBetaJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("kusciabetajobs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kusciaBetaJob. +func (c *kusciaBetaJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaBetaJob, err error) { + result = &v1alpha1.KusciaBetaJob{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("kusciabetajobs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciabetatask.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciabetatask.go new file mode 100644 index 00000000..62ed6e7c --- /dev/null +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciabetatask.go @@ -0,0 +1,193 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + scheme "github.com/secretflow/kuscia/pkg/crd/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KusciaBetaTasksGetter has a method to return a KusciaBetaTaskInterface. +// A group's client should implement this interface. +type KusciaBetaTasksGetter interface { + KusciaBetaTasks(namespace string) KusciaBetaTaskInterface +} + +// KusciaBetaTaskInterface has methods to work with KusciaBetaTask resources. +type KusciaBetaTaskInterface interface { + Create(ctx context.Context, kusciaBetaTask *v1alpha1.KusciaBetaTask, opts v1.CreateOptions) (*v1alpha1.KusciaBetaTask, error) + Update(ctx context.Context, kusciaBetaTask *v1alpha1.KusciaBetaTask, opts v1.UpdateOptions) (*v1alpha1.KusciaBetaTask, error) + UpdateStatus(ctx context.Context, kusciaBetaTask *v1alpha1.KusciaBetaTask, opts v1.UpdateOptions) (*v1alpha1.KusciaBetaTask, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.KusciaBetaTask, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.KusciaBetaTaskList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaBetaTask, err error) + KusciaBetaTaskExpansion +} + +// kusciaBetaTasks implements KusciaBetaTaskInterface +type kusciaBetaTasks struct { + client rest.Interface + ns string +} + +// newKusciaBetaTasks returns a KusciaBetaTasks +func newKusciaBetaTasks(c *KusciaV1alpha1Client, namespace string) *kusciaBetaTasks { + return &kusciaBetaTasks{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the kusciaBetaTask, and returns the corresponding kusciaBetaTask object, and an error if there is any. +func (c *kusciaBetaTasks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KusciaBetaTask, err error) { + result = &v1alpha1.KusciaBetaTask{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kusciabetatasks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KusciaBetaTasks that match those selectors. +func (c *kusciaBetaTasks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KusciaBetaTaskList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.KusciaBetaTaskList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kusciabetatasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kusciaBetaTasks. +func (c *kusciaBetaTasks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("kusciabetatasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kusciaBetaTask and creates it. Returns the server's representation of the kusciaBetaTask, and an error, if there is any. +func (c *kusciaBetaTasks) Create(ctx context.Context, kusciaBetaTask *v1alpha1.KusciaBetaTask, opts v1.CreateOptions) (result *v1alpha1.KusciaBetaTask, err error) { + result = &v1alpha1.KusciaBetaTask{} + err = c.client.Post(). + Namespace(c.ns). + Resource("kusciabetatasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaBetaTask). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kusciaBetaTask and updates it. Returns the server's representation of the kusciaBetaTask, and an error, if there is any. +func (c *kusciaBetaTasks) Update(ctx context.Context, kusciaBetaTask *v1alpha1.KusciaBetaTask, opts v1.UpdateOptions) (result *v1alpha1.KusciaBetaTask, err error) { + result = &v1alpha1.KusciaBetaTask{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kusciabetatasks"). + Name(kusciaBetaTask.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaBetaTask). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kusciaBetaTasks) UpdateStatus(ctx context.Context, kusciaBetaTask *v1alpha1.KusciaBetaTask, opts v1.UpdateOptions) (result *v1alpha1.KusciaBetaTask, err error) { + result = &v1alpha1.KusciaBetaTask{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kusciabetatasks"). + Name(kusciaBetaTask.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaBetaTask). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kusciaBetaTask and deletes it. Returns an error if one occurs. +func (c *kusciaBetaTasks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kusciabetatasks"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kusciaBetaTasks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("kusciabetatasks"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kusciaBetaTask. +func (c *kusciaBetaTasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaBetaTask, err error) { + result = &v1alpha1.KusciaBetaTask{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("kusciabetatasks"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciadeploymentsummary.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciadeploymentsummary.go new file mode 100644 index 00000000..4a8705cc --- /dev/null +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciadeploymentsummary.go @@ -0,0 +1,193 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + scheme "github.com/secretflow/kuscia/pkg/crd/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KusciaDeploymentSummariesGetter has a method to return a KusciaDeploymentSummaryInterface. +// A group's client should implement this interface. +type KusciaDeploymentSummariesGetter interface { + KusciaDeploymentSummaries(namespace string) KusciaDeploymentSummaryInterface +} + +// KusciaDeploymentSummaryInterface has methods to work with KusciaDeploymentSummary resources. +type KusciaDeploymentSummaryInterface interface { + Create(ctx context.Context, kusciaDeploymentSummary *v1alpha1.KusciaDeploymentSummary, opts v1.CreateOptions) (*v1alpha1.KusciaDeploymentSummary, error) + Update(ctx context.Context, kusciaDeploymentSummary *v1alpha1.KusciaDeploymentSummary, opts v1.UpdateOptions) (*v1alpha1.KusciaDeploymentSummary, error) + UpdateStatus(ctx context.Context, kusciaDeploymentSummary *v1alpha1.KusciaDeploymentSummary, opts v1.UpdateOptions) (*v1alpha1.KusciaDeploymentSummary, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.KusciaDeploymentSummary, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.KusciaDeploymentSummaryList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaDeploymentSummary, err error) + KusciaDeploymentSummaryExpansion +} + +// kusciaDeploymentSummaries implements KusciaDeploymentSummaryInterface +type kusciaDeploymentSummaries struct { + client rest.Interface + ns string +} + +// newKusciaDeploymentSummaries returns a KusciaDeploymentSummaries +func newKusciaDeploymentSummaries(c *KusciaV1alpha1Client, namespace string) *kusciaDeploymentSummaries { + return &kusciaDeploymentSummaries{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the kusciaDeploymentSummary, and returns the corresponding kusciaDeploymentSummary object, and an error if there is any. +func (c *kusciaDeploymentSummaries) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KusciaDeploymentSummary, err error) { + result = &v1alpha1.KusciaDeploymentSummary{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kusciadeploymentsummaries"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KusciaDeploymentSummaries that match those selectors. +func (c *kusciaDeploymentSummaries) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KusciaDeploymentSummaryList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.KusciaDeploymentSummaryList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kusciadeploymentsummaries"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kusciaDeploymentSummaries. +func (c *kusciaDeploymentSummaries) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("kusciadeploymentsummaries"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kusciaDeploymentSummary and creates it. Returns the server's representation of the kusciaDeploymentSummary, and an error, if there is any. +func (c *kusciaDeploymentSummaries) Create(ctx context.Context, kusciaDeploymentSummary *v1alpha1.KusciaDeploymentSummary, opts v1.CreateOptions) (result *v1alpha1.KusciaDeploymentSummary, err error) { + result = &v1alpha1.KusciaDeploymentSummary{} + err = c.client.Post(). + Namespace(c.ns). + Resource("kusciadeploymentsummaries"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaDeploymentSummary). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kusciaDeploymentSummary and updates it. Returns the server's representation of the kusciaDeploymentSummary, and an error, if there is any. +func (c *kusciaDeploymentSummaries) Update(ctx context.Context, kusciaDeploymentSummary *v1alpha1.KusciaDeploymentSummary, opts v1.UpdateOptions) (result *v1alpha1.KusciaDeploymentSummary, err error) { + result = &v1alpha1.KusciaDeploymentSummary{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kusciadeploymentsummaries"). + Name(kusciaDeploymentSummary.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaDeploymentSummary). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kusciaDeploymentSummaries) UpdateStatus(ctx context.Context, kusciaDeploymentSummary *v1alpha1.KusciaDeploymentSummary, opts v1.UpdateOptions) (result *v1alpha1.KusciaDeploymentSummary, err error) { + result = &v1alpha1.KusciaDeploymentSummary{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kusciadeploymentsummaries"). + Name(kusciaDeploymentSummary.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaDeploymentSummary). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kusciaDeploymentSummary and deletes it. Returns an error if one occurs. +func (c *kusciaDeploymentSummaries) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kusciadeploymentsummaries"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kusciaDeploymentSummaries) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("kusciadeploymentsummaries"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kusciaDeploymentSummary. +func (c *kusciaDeploymentSummaries) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaDeploymentSummary, err error) { + result = &v1alpha1.KusciaDeploymentSummary{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("kusciadeploymentsummaries"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciajobsummary.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciajobsummary.go new file mode 100644 index 00000000..bc0baaa0 --- /dev/null +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciajobsummary.go @@ -0,0 +1,193 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + scheme "github.com/secretflow/kuscia/pkg/crd/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KusciaJobSummariesGetter has a method to return a KusciaJobSummaryInterface. +// A group's client should implement this interface. +type KusciaJobSummariesGetter interface { + KusciaJobSummaries(namespace string) KusciaJobSummaryInterface +} + +// KusciaJobSummaryInterface has methods to work with KusciaJobSummary resources. +type KusciaJobSummaryInterface interface { + Create(ctx context.Context, kusciaJobSummary *v1alpha1.KusciaJobSummary, opts v1.CreateOptions) (*v1alpha1.KusciaJobSummary, error) + Update(ctx context.Context, kusciaJobSummary *v1alpha1.KusciaJobSummary, opts v1.UpdateOptions) (*v1alpha1.KusciaJobSummary, error) + UpdateStatus(ctx context.Context, kusciaJobSummary *v1alpha1.KusciaJobSummary, opts v1.UpdateOptions) (*v1alpha1.KusciaJobSummary, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.KusciaJobSummary, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.KusciaJobSummaryList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaJobSummary, err error) + KusciaJobSummaryExpansion +} + +// kusciaJobSummaries implements KusciaJobSummaryInterface +type kusciaJobSummaries struct { + client rest.Interface + ns string +} + +// newKusciaJobSummaries returns a KusciaJobSummaries +func newKusciaJobSummaries(c *KusciaV1alpha1Client, namespace string) *kusciaJobSummaries { + return &kusciaJobSummaries{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the kusciaJobSummary, and returns the corresponding kusciaJobSummary object, and an error if there is any. +func (c *kusciaJobSummaries) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KusciaJobSummary, err error) { + result = &v1alpha1.KusciaJobSummary{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kusciajobsummaries"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KusciaJobSummaries that match those selectors. +func (c *kusciaJobSummaries) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KusciaJobSummaryList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.KusciaJobSummaryList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kusciajobsummaries"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kusciaJobSummaries. +func (c *kusciaJobSummaries) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("kusciajobsummaries"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kusciaJobSummary and creates it. Returns the server's representation of the kusciaJobSummary, and an error, if there is any. +func (c *kusciaJobSummaries) Create(ctx context.Context, kusciaJobSummary *v1alpha1.KusciaJobSummary, opts v1.CreateOptions) (result *v1alpha1.KusciaJobSummary, err error) { + result = &v1alpha1.KusciaJobSummary{} + err = c.client.Post(). + Namespace(c.ns). + Resource("kusciajobsummaries"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaJobSummary). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kusciaJobSummary and updates it. Returns the server's representation of the kusciaJobSummary, and an error, if there is any. +func (c *kusciaJobSummaries) Update(ctx context.Context, kusciaJobSummary *v1alpha1.KusciaJobSummary, opts v1.UpdateOptions) (result *v1alpha1.KusciaJobSummary, err error) { + result = &v1alpha1.KusciaJobSummary{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kusciajobsummaries"). + Name(kusciaJobSummary.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaJobSummary). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kusciaJobSummaries) UpdateStatus(ctx context.Context, kusciaJobSummary *v1alpha1.KusciaJobSummary, opts v1.UpdateOptions) (result *v1alpha1.KusciaJobSummary, err error) { + result = &v1alpha1.KusciaJobSummary{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kusciajobsummaries"). + Name(kusciaJobSummary.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaJobSummary). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kusciaJobSummary and deletes it. Returns an error if one occurs. +func (c *kusciaJobSummaries) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kusciajobsummaries"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kusciaJobSummaries) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("kusciajobsummaries"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kusciaJobSummary. +func (c *kusciaJobSummaries) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaJobSummary, err error) { + result = &v1alpha1.KusciaJobSummary{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("kusciajobsummaries"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciatasksummary.go b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciatasksummary.go new file mode 100644 index 00000000..51213ae6 --- /dev/null +++ b/pkg/crd/clientset/versioned/typed/kuscia/v1alpha1/kusciatasksummary.go @@ -0,0 +1,193 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + scheme "github.com/secretflow/kuscia/pkg/crd/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KusciaTaskSummariesGetter has a method to return a KusciaTaskSummaryInterface. +// A group's client should implement this interface. +type KusciaTaskSummariesGetter interface { + KusciaTaskSummaries(namespace string) KusciaTaskSummaryInterface +} + +// KusciaTaskSummaryInterface has methods to work with KusciaTaskSummary resources. +type KusciaTaskSummaryInterface interface { + Create(ctx context.Context, kusciaTaskSummary *v1alpha1.KusciaTaskSummary, opts v1.CreateOptions) (*v1alpha1.KusciaTaskSummary, error) + Update(ctx context.Context, kusciaTaskSummary *v1alpha1.KusciaTaskSummary, opts v1.UpdateOptions) (*v1alpha1.KusciaTaskSummary, error) + UpdateStatus(ctx context.Context, kusciaTaskSummary *v1alpha1.KusciaTaskSummary, opts v1.UpdateOptions) (*v1alpha1.KusciaTaskSummary, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.KusciaTaskSummary, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.KusciaTaskSummaryList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaTaskSummary, err error) + KusciaTaskSummaryExpansion +} + +// kusciaTaskSummaries implements KusciaTaskSummaryInterface +type kusciaTaskSummaries struct { + client rest.Interface + ns string +} + +// newKusciaTaskSummaries returns a KusciaTaskSummaries +func newKusciaTaskSummaries(c *KusciaV1alpha1Client, namespace string) *kusciaTaskSummaries { + return &kusciaTaskSummaries{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the kusciaTaskSummary, and returns the corresponding kusciaTaskSummary object, and an error if there is any. +func (c *kusciaTaskSummaries) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KusciaTaskSummary, err error) { + result = &v1alpha1.KusciaTaskSummary{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kusciatasksummaries"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KusciaTaskSummaries that match those selectors. +func (c *kusciaTaskSummaries) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KusciaTaskSummaryList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.KusciaTaskSummaryList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kusciatasksummaries"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kusciaTaskSummaries. +func (c *kusciaTaskSummaries) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("kusciatasksummaries"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kusciaTaskSummary and creates it. Returns the server's representation of the kusciaTaskSummary, and an error, if there is any. +func (c *kusciaTaskSummaries) Create(ctx context.Context, kusciaTaskSummary *v1alpha1.KusciaTaskSummary, opts v1.CreateOptions) (result *v1alpha1.KusciaTaskSummary, err error) { + result = &v1alpha1.KusciaTaskSummary{} + err = c.client.Post(). + Namespace(c.ns). + Resource("kusciatasksummaries"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaTaskSummary). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kusciaTaskSummary and updates it. Returns the server's representation of the kusciaTaskSummary, and an error, if there is any. +func (c *kusciaTaskSummaries) Update(ctx context.Context, kusciaTaskSummary *v1alpha1.KusciaTaskSummary, opts v1.UpdateOptions) (result *v1alpha1.KusciaTaskSummary, err error) { + result = &v1alpha1.KusciaTaskSummary{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kusciatasksummaries"). + Name(kusciaTaskSummary.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaTaskSummary). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kusciaTaskSummaries) UpdateStatus(ctx context.Context, kusciaTaskSummary *v1alpha1.KusciaTaskSummary, opts v1.UpdateOptions) (result *v1alpha1.KusciaTaskSummary, err error) { + result = &v1alpha1.KusciaTaskSummary{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kusciatasksummaries"). + Name(kusciaTaskSummary.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kusciaTaskSummary). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kusciaTaskSummary and deletes it. Returns an error if one occurs. +func (c *kusciaTaskSummaries) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kusciatasksummaries"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kusciaTaskSummaries) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("kusciatasksummaries"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kusciaTaskSummary. +func (c *kusciaTaskSummaries) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KusciaTaskSummary, err error) { + result = &v1alpha1.KusciaTaskSummary{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("kusciatasksummaries"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/crd/informers/externalversions/generic.go b/pkg/crd/informers/externalversions/generic.go index 25ebf91d..c4396da6 100644 --- a/pkg/crd/informers/externalversions/generic.go +++ b/pkg/crd/informers/externalversions/generic.go @@ -71,12 +71,24 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Kuscia().V1alpha1().Gateways().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("interopconfigs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Kuscia().V1alpha1().InteropConfigs().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("kusciabetadeployments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Kuscia().V1alpha1().KusciaBetaDeployments().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("kusciabetajobs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Kuscia().V1alpha1().KusciaBetaJobs().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("kusciabetatasks"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Kuscia().V1alpha1().KusciaBetaTasks().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("kusciadeployments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Kuscia().V1alpha1().KusciaDeployments().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("kusciadeploymentsummaries"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Kuscia().V1alpha1().KusciaDeploymentSummaries().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("kusciajobs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Kuscia().V1alpha1().KusciaJobs().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("kusciajobsummaries"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Kuscia().V1alpha1().KusciaJobSummaries().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("kusciatasks"): return &genericInformer{resource: resource.GroupResource(), informer: f.Kuscia().V1alpha1().KusciaTasks().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("kusciatasksummaries"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Kuscia().V1alpha1().KusciaTaskSummaries().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("taskresources"): return &genericInformer{resource: resource.GroupResource(), informer: f.Kuscia().V1alpha1().TaskResources().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("taskresourcegroups"): diff --git a/pkg/crd/informers/externalversions/kuscia/v1alpha1/interface.go b/pkg/crd/informers/externalversions/kuscia/v1alpha1/interface.go index 838829bd..e162b3f2 100644 --- a/pkg/crd/informers/externalversions/kuscia/v1alpha1/interface.go +++ b/pkg/crd/informers/externalversions/kuscia/v1alpha1/interface.go @@ -42,12 +42,24 @@ type Interface interface { Gateways() GatewayInformer // InteropConfigs returns a InteropConfigInformer. InteropConfigs() InteropConfigInformer + // KusciaBetaDeployments returns a KusciaBetaDeploymentInformer. + KusciaBetaDeployments() KusciaBetaDeploymentInformer + // KusciaBetaJobs returns a KusciaBetaJobInformer. + KusciaBetaJobs() KusciaBetaJobInformer + // KusciaBetaTasks returns a KusciaBetaTaskInformer. + KusciaBetaTasks() KusciaBetaTaskInformer // KusciaDeployments returns a KusciaDeploymentInformer. KusciaDeployments() KusciaDeploymentInformer + // KusciaDeploymentSummaries returns a KusciaDeploymentSummaryInformer. + KusciaDeploymentSummaries() KusciaDeploymentSummaryInformer // KusciaJobs returns a KusciaJobInformer. KusciaJobs() KusciaJobInformer + // KusciaJobSummaries returns a KusciaJobSummaryInformer. + KusciaJobSummaries() KusciaJobSummaryInformer // KusciaTasks returns a KusciaTaskInformer. KusciaTasks() KusciaTaskInformer + // KusciaTaskSummaries returns a KusciaTaskSummaryInformer. + KusciaTaskSummaries() KusciaTaskSummaryInformer // TaskResources returns a TaskResourceInformer. TaskResources() TaskResourceInformer // TaskResourceGroups returns a TaskResourceGroupInformer. @@ -115,21 +127,51 @@ func (v *version) InteropConfigs() InteropConfigInformer { return &interopConfigInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// KusciaBetaDeployments returns a KusciaBetaDeploymentInformer. +func (v *version) KusciaBetaDeployments() KusciaBetaDeploymentInformer { + return &kusciaBetaDeploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// KusciaBetaJobs returns a KusciaBetaJobInformer. +func (v *version) KusciaBetaJobs() KusciaBetaJobInformer { + return &kusciaBetaJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// KusciaBetaTasks returns a KusciaBetaTaskInformer. +func (v *version) KusciaBetaTasks() KusciaBetaTaskInformer { + return &kusciaBetaTaskInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // KusciaDeployments returns a KusciaDeploymentInformer. func (v *version) KusciaDeployments() KusciaDeploymentInformer { return &kusciaDeploymentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// KusciaDeploymentSummaries returns a KusciaDeploymentSummaryInformer. +func (v *version) KusciaDeploymentSummaries() KusciaDeploymentSummaryInformer { + return &kusciaDeploymentSummaryInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // KusciaJobs returns a KusciaJobInformer. func (v *version) KusciaJobs() KusciaJobInformer { return &kusciaJobInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// KusciaJobSummaries returns a KusciaJobSummaryInformer. +func (v *version) KusciaJobSummaries() KusciaJobSummaryInformer { + return &kusciaJobSummaryInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // KusciaTasks returns a KusciaTaskInformer. func (v *version) KusciaTasks() KusciaTaskInformer { return &kusciaTaskInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// KusciaTaskSummaries returns a KusciaTaskSummaryInformer. +func (v *version) KusciaTaskSummaries() KusciaTaskSummaryInformer { + return &kusciaTaskSummaryInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // TaskResources returns a TaskResourceInformer. func (v *version) TaskResources() TaskResourceInformer { return &taskResourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciabetadeployment.go b/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciabetadeployment.go new file mode 100644 index 00000000..b2ed47f7 --- /dev/null +++ b/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciabetadeployment.go @@ -0,0 +1,88 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + kusciav1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + versioned "github.com/secretflow/kuscia/pkg/crd/clientset/versioned" + internalinterfaces "github.com/secretflow/kuscia/pkg/crd/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/listers/kuscia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// KusciaBetaDeploymentInformer provides access to a shared informer and lister for +// KusciaBetaDeployments. +type KusciaBetaDeploymentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.KusciaBetaDeploymentLister +} + +type kusciaBetaDeploymentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKusciaBetaDeploymentInformer constructs a new informer for KusciaBetaDeployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKusciaBetaDeploymentInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKusciaBetaDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKusciaBetaDeploymentInformer constructs a new informer for KusciaBetaDeployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKusciaBetaDeploymentInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KusciaV1alpha1().KusciaBetaDeployments(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KusciaV1alpha1().KusciaBetaDeployments(namespace).Watch(context.TODO(), options) + }, + }, + &kusciav1alpha1.KusciaBetaDeployment{}, + resyncPeriod, + indexers, + ) +} + +func (f *kusciaBetaDeploymentInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKusciaBetaDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kusciaBetaDeploymentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&kusciav1alpha1.KusciaBetaDeployment{}, f.defaultInformer) +} + +func (f *kusciaBetaDeploymentInformer) Lister() v1alpha1.KusciaBetaDeploymentLister { + return v1alpha1.NewKusciaBetaDeploymentLister(f.Informer().GetIndexer()) +} diff --git a/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciabetajob.go b/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciabetajob.go new file mode 100644 index 00000000..63596200 --- /dev/null +++ b/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciabetajob.go @@ -0,0 +1,88 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + kusciav1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + versioned "github.com/secretflow/kuscia/pkg/crd/clientset/versioned" + internalinterfaces "github.com/secretflow/kuscia/pkg/crd/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/listers/kuscia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// KusciaBetaJobInformer provides access to a shared informer and lister for +// KusciaBetaJobs. +type KusciaBetaJobInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.KusciaBetaJobLister +} + +type kusciaBetaJobInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKusciaBetaJobInformer constructs a new informer for KusciaBetaJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKusciaBetaJobInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKusciaBetaJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKusciaBetaJobInformer constructs a new informer for KusciaBetaJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKusciaBetaJobInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KusciaV1alpha1().KusciaBetaJobs(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KusciaV1alpha1().KusciaBetaJobs(namespace).Watch(context.TODO(), options) + }, + }, + &kusciav1alpha1.KusciaBetaJob{}, + resyncPeriod, + indexers, + ) +} + +func (f *kusciaBetaJobInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKusciaBetaJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kusciaBetaJobInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&kusciav1alpha1.KusciaBetaJob{}, f.defaultInformer) +} + +func (f *kusciaBetaJobInformer) Lister() v1alpha1.KusciaBetaJobLister { + return v1alpha1.NewKusciaBetaJobLister(f.Informer().GetIndexer()) +} diff --git a/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciabetatask.go b/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciabetatask.go new file mode 100644 index 00000000..3d6838d8 --- /dev/null +++ b/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciabetatask.go @@ -0,0 +1,88 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + kusciav1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + versioned "github.com/secretflow/kuscia/pkg/crd/clientset/versioned" + internalinterfaces "github.com/secretflow/kuscia/pkg/crd/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/listers/kuscia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// KusciaBetaTaskInformer provides access to a shared informer and lister for +// KusciaBetaTasks. +type KusciaBetaTaskInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.KusciaBetaTaskLister +} + +type kusciaBetaTaskInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKusciaBetaTaskInformer constructs a new informer for KusciaBetaTask type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKusciaBetaTaskInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKusciaBetaTaskInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKusciaBetaTaskInformer constructs a new informer for KusciaBetaTask type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKusciaBetaTaskInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KusciaV1alpha1().KusciaBetaTasks(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KusciaV1alpha1().KusciaBetaTasks(namespace).Watch(context.TODO(), options) + }, + }, + &kusciav1alpha1.KusciaBetaTask{}, + resyncPeriod, + indexers, + ) +} + +func (f *kusciaBetaTaskInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKusciaBetaTaskInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kusciaBetaTaskInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&kusciav1alpha1.KusciaBetaTask{}, f.defaultInformer) +} + +func (f *kusciaBetaTaskInformer) Lister() v1alpha1.KusciaBetaTaskLister { + return v1alpha1.NewKusciaBetaTaskLister(f.Informer().GetIndexer()) +} diff --git a/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciadeploymentsummary.go b/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciadeploymentsummary.go new file mode 100644 index 00000000..bfe7a68f --- /dev/null +++ b/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciadeploymentsummary.go @@ -0,0 +1,88 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + kusciav1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + versioned "github.com/secretflow/kuscia/pkg/crd/clientset/versioned" + internalinterfaces "github.com/secretflow/kuscia/pkg/crd/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/listers/kuscia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// KusciaDeploymentSummaryInformer provides access to a shared informer and lister for +// KusciaDeploymentSummaries. +type KusciaDeploymentSummaryInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.KusciaDeploymentSummaryLister +} + +type kusciaDeploymentSummaryInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKusciaDeploymentSummaryInformer constructs a new informer for KusciaDeploymentSummary type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKusciaDeploymentSummaryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKusciaDeploymentSummaryInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKusciaDeploymentSummaryInformer constructs a new informer for KusciaDeploymentSummary type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKusciaDeploymentSummaryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KusciaV1alpha1().KusciaDeploymentSummaries(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KusciaV1alpha1().KusciaDeploymentSummaries(namespace).Watch(context.TODO(), options) + }, + }, + &kusciav1alpha1.KusciaDeploymentSummary{}, + resyncPeriod, + indexers, + ) +} + +func (f *kusciaDeploymentSummaryInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKusciaDeploymentSummaryInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kusciaDeploymentSummaryInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&kusciav1alpha1.KusciaDeploymentSummary{}, f.defaultInformer) +} + +func (f *kusciaDeploymentSummaryInformer) Lister() v1alpha1.KusciaDeploymentSummaryLister { + return v1alpha1.NewKusciaDeploymentSummaryLister(f.Informer().GetIndexer()) +} diff --git a/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciajobsummary.go b/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciajobsummary.go new file mode 100644 index 00000000..cc90c6de --- /dev/null +++ b/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciajobsummary.go @@ -0,0 +1,88 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + kusciav1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + versioned "github.com/secretflow/kuscia/pkg/crd/clientset/versioned" + internalinterfaces "github.com/secretflow/kuscia/pkg/crd/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/listers/kuscia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// KusciaJobSummaryInformer provides access to a shared informer and lister for +// KusciaJobSummaries. +type KusciaJobSummaryInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.KusciaJobSummaryLister +} + +type kusciaJobSummaryInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKusciaJobSummaryInformer constructs a new informer for KusciaJobSummary type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKusciaJobSummaryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKusciaJobSummaryInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKusciaJobSummaryInformer constructs a new informer for KusciaJobSummary type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKusciaJobSummaryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KusciaV1alpha1().KusciaJobSummaries(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KusciaV1alpha1().KusciaJobSummaries(namespace).Watch(context.TODO(), options) + }, + }, + &kusciav1alpha1.KusciaJobSummary{}, + resyncPeriod, + indexers, + ) +} + +func (f *kusciaJobSummaryInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKusciaJobSummaryInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kusciaJobSummaryInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&kusciav1alpha1.KusciaJobSummary{}, f.defaultInformer) +} + +func (f *kusciaJobSummaryInformer) Lister() v1alpha1.KusciaJobSummaryLister { + return v1alpha1.NewKusciaJobSummaryLister(f.Informer().GetIndexer()) +} diff --git a/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciatasksummary.go b/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciatasksummary.go new file mode 100644 index 00000000..7cf8dc57 --- /dev/null +++ b/pkg/crd/informers/externalversions/kuscia/v1alpha1/kusciatasksummary.go @@ -0,0 +1,88 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + kusciav1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + versioned "github.com/secretflow/kuscia/pkg/crd/clientset/versioned" + internalinterfaces "github.com/secretflow/kuscia/pkg/crd/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/listers/kuscia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// KusciaTaskSummaryInformer provides access to a shared informer and lister for +// KusciaTaskSummaries. +type KusciaTaskSummaryInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.KusciaTaskSummaryLister +} + +type kusciaTaskSummaryInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKusciaTaskSummaryInformer constructs a new informer for KusciaTaskSummary type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKusciaTaskSummaryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKusciaTaskSummaryInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKusciaTaskSummaryInformer constructs a new informer for KusciaTaskSummary type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKusciaTaskSummaryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KusciaV1alpha1().KusciaTaskSummaries(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KusciaV1alpha1().KusciaTaskSummaries(namespace).Watch(context.TODO(), options) + }, + }, + &kusciav1alpha1.KusciaTaskSummary{}, + resyncPeriod, + indexers, + ) +} + +func (f *kusciaTaskSummaryInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKusciaTaskSummaryInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kusciaTaskSummaryInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&kusciav1alpha1.KusciaTaskSummary{}, f.defaultInformer) +} + +func (f *kusciaTaskSummaryInformer) Lister() v1alpha1.KusciaTaskSummaryLister { + return v1alpha1.NewKusciaTaskSummaryLister(f.Informer().GetIndexer()) +} diff --git a/pkg/crd/listers/kuscia/v1alpha1/expansion_generated.go b/pkg/crd/listers/kuscia/v1alpha1/expansion_generated.go index ccf758c9..85cd22e9 100644 --- a/pkg/crd/listers/kuscia/v1alpha1/expansion_generated.go +++ b/pkg/crd/listers/kuscia/v1alpha1/expansion_generated.go @@ -80,18 +80,66 @@ type GatewayNamespaceListerExpansion interface{} // InteropConfigLister. type InteropConfigListerExpansion interface{} +// KusciaBetaDeploymentListerExpansion allows custom methods to be added to +// KusciaBetaDeploymentLister. +type KusciaBetaDeploymentListerExpansion interface{} + +// KusciaBetaDeploymentNamespaceListerExpansion allows custom methods to be added to +// KusciaBetaDeploymentNamespaceLister. +type KusciaBetaDeploymentNamespaceListerExpansion interface{} + +// KusciaBetaJobListerExpansion allows custom methods to be added to +// KusciaBetaJobLister. +type KusciaBetaJobListerExpansion interface{} + +// KusciaBetaJobNamespaceListerExpansion allows custom methods to be added to +// KusciaBetaJobNamespaceLister. +type KusciaBetaJobNamespaceListerExpansion interface{} + +// KusciaBetaTaskListerExpansion allows custom methods to be added to +// KusciaBetaTaskLister. +type KusciaBetaTaskListerExpansion interface{} + +// KusciaBetaTaskNamespaceListerExpansion allows custom methods to be added to +// KusciaBetaTaskNamespaceLister. +type KusciaBetaTaskNamespaceListerExpansion interface{} + // KusciaDeploymentListerExpansion allows custom methods to be added to // KusciaDeploymentLister. type KusciaDeploymentListerExpansion interface{} +// KusciaDeploymentSummaryListerExpansion allows custom methods to be added to +// KusciaDeploymentSummaryLister. +type KusciaDeploymentSummaryListerExpansion interface{} + +// KusciaDeploymentSummaryNamespaceListerExpansion allows custom methods to be added to +// KusciaDeploymentSummaryNamespaceLister. +type KusciaDeploymentSummaryNamespaceListerExpansion interface{} + // KusciaJobListerExpansion allows custom methods to be added to // KusciaJobLister. type KusciaJobListerExpansion interface{} +// KusciaJobSummaryListerExpansion allows custom methods to be added to +// KusciaJobSummaryLister. +type KusciaJobSummaryListerExpansion interface{} + +// KusciaJobSummaryNamespaceListerExpansion allows custom methods to be added to +// KusciaJobSummaryNamespaceLister. +type KusciaJobSummaryNamespaceListerExpansion interface{} + // KusciaTaskListerExpansion allows custom methods to be added to // KusciaTaskLister. type KusciaTaskListerExpansion interface{} +// KusciaTaskSummaryListerExpansion allows custom methods to be added to +// KusciaTaskSummaryLister. +type KusciaTaskSummaryListerExpansion interface{} + +// KusciaTaskSummaryNamespaceListerExpansion allows custom methods to be added to +// KusciaTaskSummaryNamespaceLister. +type KusciaTaskSummaryNamespaceListerExpansion interface{} + // TaskResourceListerExpansion allows custom methods to be added to // TaskResourceLister. type TaskResourceListerExpansion interface{} diff --git a/pkg/crd/listers/kuscia/v1alpha1/kusciabetadeployment.go b/pkg/crd/listers/kuscia/v1alpha1/kusciabetadeployment.go new file mode 100644 index 00000000..f163da95 --- /dev/null +++ b/pkg/crd/listers/kuscia/v1alpha1/kusciabetadeployment.go @@ -0,0 +1,97 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// KusciaBetaDeploymentLister helps list KusciaBetaDeployments. +// All objects returned here must be treated as read-only. +type KusciaBetaDeploymentLister interface { + // List lists all KusciaBetaDeployments in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KusciaBetaDeployment, err error) + // KusciaBetaDeployments returns an object that can list and get KusciaBetaDeployments. + KusciaBetaDeployments(namespace string) KusciaBetaDeploymentNamespaceLister + KusciaBetaDeploymentListerExpansion +} + +// kusciaBetaDeploymentLister implements the KusciaBetaDeploymentLister interface. +type kusciaBetaDeploymentLister struct { + indexer cache.Indexer +} + +// NewKusciaBetaDeploymentLister returns a new KusciaBetaDeploymentLister. +func NewKusciaBetaDeploymentLister(indexer cache.Indexer) KusciaBetaDeploymentLister { + return &kusciaBetaDeploymentLister{indexer: indexer} +} + +// List lists all KusciaBetaDeployments in the indexer. +func (s *kusciaBetaDeploymentLister) List(selector labels.Selector) (ret []*v1alpha1.KusciaBetaDeployment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KusciaBetaDeployment)) + }) + return ret, err +} + +// KusciaBetaDeployments returns an object that can list and get KusciaBetaDeployments. +func (s *kusciaBetaDeploymentLister) KusciaBetaDeployments(namespace string) KusciaBetaDeploymentNamespaceLister { + return kusciaBetaDeploymentNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// KusciaBetaDeploymentNamespaceLister helps list and get KusciaBetaDeployments. +// All objects returned here must be treated as read-only. +type KusciaBetaDeploymentNamespaceLister interface { + // List lists all KusciaBetaDeployments in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KusciaBetaDeployment, err error) + // Get retrieves the KusciaBetaDeployment from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.KusciaBetaDeployment, error) + KusciaBetaDeploymentNamespaceListerExpansion +} + +// kusciaBetaDeploymentNamespaceLister implements the KusciaBetaDeploymentNamespaceLister +// interface. +type kusciaBetaDeploymentNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all KusciaBetaDeployments in the indexer for a given namespace. +func (s kusciaBetaDeploymentNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.KusciaBetaDeployment, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KusciaBetaDeployment)) + }) + return ret, err +} + +// Get retrieves the KusciaBetaDeployment from the indexer for a given namespace and name. +func (s kusciaBetaDeploymentNamespaceLister) Get(name string) (*v1alpha1.KusciaBetaDeployment, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("kusciabetadeployment"), name) + } + return obj.(*v1alpha1.KusciaBetaDeployment), nil +} diff --git a/pkg/crd/listers/kuscia/v1alpha1/kusciabetajob.go b/pkg/crd/listers/kuscia/v1alpha1/kusciabetajob.go new file mode 100644 index 00000000..e3818e0c --- /dev/null +++ b/pkg/crd/listers/kuscia/v1alpha1/kusciabetajob.go @@ -0,0 +1,97 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// KusciaBetaJobLister helps list KusciaBetaJobs. +// All objects returned here must be treated as read-only. +type KusciaBetaJobLister interface { + // List lists all KusciaBetaJobs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KusciaBetaJob, err error) + // KusciaBetaJobs returns an object that can list and get KusciaBetaJobs. + KusciaBetaJobs(namespace string) KusciaBetaJobNamespaceLister + KusciaBetaJobListerExpansion +} + +// kusciaBetaJobLister implements the KusciaBetaJobLister interface. +type kusciaBetaJobLister struct { + indexer cache.Indexer +} + +// NewKusciaBetaJobLister returns a new KusciaBetaJobLister. +func NewKusciaBetaJobLister(indexer cache.Indexer) KusciaBetaJobLister { + return &kusciaBetaJobLister{indexer: indexer} +} + +// List lists all KusciaBetaJobs in the indexer. +func (s *kusciaBetaJobLister) List(selector labels.Selector) (ret []*v1alpha1.KusciaBetaJob, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KusciaBetaJob)) + }) + return ret, err +} + +// KusciaBetaJobs returns an object that can list and get KusciaBetaJobs. +func (s *kusciaBetaJobLister) KusciaBetaJobs(namespace string) KusciaBetaJobNamespaceLister { + return kusciaBetaJobNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// KusciaBetaJobNamespaceLister helps list and get KusciaBetaJobs. +// All objects returned here must be treated as read-only. +type KusciaBetaJobNamespaceLister interface { + // List lists all KusciaBetaJobs in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KusciaBetaJob, err error) + // Get retrieves the KusciaBetaJob from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.KusciaBetaJob, error) + KusciaBetaJobNamespaceListerExpansion +} + +// kusciaBetaJobNamespaceLister implements the KusciaBetaJobNamespaceLister +// interface. +type kusciaBetaJobNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all KusciaBetaJobs in the indexer for a given namespace. +func (s kusciaBetaJobNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.KusciaBetaJob, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KusciaBetaJob)) + }) + return ret, err +} + +// Get retrieves the KusciaBetaJob from the indexer for a given namespace and name. +func (s kusciaBetaJobNamespaceLister) Get(name string) (*v1alpha1.KusciaBetaJob, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("kusciabetajob"), name) + } + return obj.(*v1alpha1.KusciaBetaJob), nil +} diff --git a/pkg/crd/listers/kuscia/v1alpha1/kusciabetatask.go b/pkg/crd/listers/kuscia/v1alpha1/kusciabetatask.go new file mode 100644 index 00000000..cf50c4f7 --- /dev/null +++ b/pkg/crd/listers/kuscia/v1alpha1/kusciabetatask.go @@ -0,0 +1,97 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// KusciaBetaTaskLister helps list KusciaBetaTasks. +// All objects returned here must be treated as read-only. +type KusciaBetaTaskLister interface { + // List lists all KusciaBetaTasks in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KusciaBetaTask, err error) + // KusciaBetaTasks returns an object that can list and get KusciaBetaTasks. + KusciaBetaTasks(namespace string) KusciaBetaTaskNamespaceLister + KusciaBetaTaskListerExpansion +} + +// kusciaBetaTaskLister implements the KusciaBetaTaskLister interface. +type kusciaBetaTaskLister struct { + indexer cache.Indexer +} + +// NewKusciaBetaTaskLister returns a new KusciaBetaTaskLister. +func NewKusciaBetaTaskLister(indexer cache.Indexer) KusciaBetaTaskLister { + return &kusciaBetaTaskLister{indexer: indexer} +} + +// List lists all KusciaBetaTasks in the indexer. +func (s *kusciaBetaTaskLister) List(selector labels.Selector) (ret []*v1alpha1.KusciaBetaTask, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KusciaBetaTask)) + }) + return ret, err +} + +// KusciaBetaTasks returns an object that can list and get KusciaBetaTasks. +func (s *kusciaBetaTaskLister) KusciaBetaTasks(namespace string) KusciaBetaTaskNamespaceLister { + return kusciaBetaTaskNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// KusciaBetaTaskNamespaceLister helps list and get KusciaBetaTasks. +// All objects returned here must be treated as read-only. +type KusciaBetaTaskNamespaceLister interface { + // List lists all KusciaBetaTasks in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KusciaBetaTask, err error) + // Get retrieves the KusciaBetaTask from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.KusciaBetaTask, error) + KusciaBetaTaskNamespaceListerExpansion +} + +// kusciaBetaTaskNamespaceLister implements the KusciaBetaTaskNamespaceLister +// interface. +type kusciaBetaTaskNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all KusciaBetaTasks in the indexer for a given namespace. +func (s kusciaBetaTaskNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.KusciaBetaTask, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KusciaBetaTask)) + }) + return ret, err +} + +// Get retrieves the KusciaBetaTask from the indexer for a given namespace and name. +func (s kusciaBetaTaskNamespaceLister) Get(name string) (*v1alpha1.KusciaBetaTask, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("kusciabetatask"), name) + } + return obj.(*v1alpha1.KusciaBetaTask), nil +} diff --git a/pkg/crd/listers/kuscia/v1alpha1/kusciadeploymentsummary.go b/pkg/crd/listers/kuscia/v1alpha1/kusciadeploymentsummary.go new file mode 100644 index 00000000..9e0bb83f --- /dev/null +++ b/pkg/crd/listers/kuscia/v1alpha1/kusciadeploymentsummary.go @@ -0,0 +1,97 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// KusciaDeploymentSummaryLister helps list KusciaDeploymentSummaries. +// All objects returned here must be treated as read-only. +type KusciaDeploymentSummaryLister interface { + // List lists all KusciaDeploymentSummaries in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KusciaDeploymentSummary, err error) + // KusciaDeploymentSummaries returns an object that can list and get KusciaDeploymentSummaries. + KusciaDeploymentSummaries(namespace string) KusciaDeploymentSummaryNamespaceLister + KusciaDeploymentSummaryListerExpansion +} + +// kusciaDeploymentSummaryLister implements the KusciaDeploymentSummaryLister interface. +type kusciaDeploymentSummaryLister struct { + indexer cache.Indexer +} + +// NewKusciaDeploymentSummaryLister returns a new KusciaDeploymentSummaryLister. +func NewKusciaDeploymentSummaryLister(indexer cache.Indexer) KusciaDeploymentSummaryLister { + return &kusciaDeploymentSummaryLister{indexer: indexer} +} + +// List lists all KusciaDeploymentSummaries in the indexer. +func (s *kusciaDeploymentSummaryLister) List(selector labels.Selector) (ret []*v1alpha1.KusciaDeploymentSummary, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KusciaDeploymentSummary)) + }) + return ret, err +} + +// KusciaDeploymentSummaries returns an object that can list and get KusciaDeploymentSummaries. +func (s *kusciaDeploymentSummaryLister) KusciaDeploymentSummaries(namespace string) KusciaDeploymentSummaryNamespaceLister { + return kusciaDeploymentSummaryNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// KusciaDeploymentSummaryNamespaceLister helps list and get KusciaDeploymentSummaries. +// All objects returned here must be treated as read-only. +type KusciaDeploymentSummaryNamespaceLister interface { + // List lists all KusciaDeploymentSummaries in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KusciaDeploymentSummary, err error) + // Get retrieves the KusciaDeploymentSummary from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.KusciaDeploymentSummary, error) + KusciaDeploymentSummaryNamespaceListerExpansion +} + +// kusciaDeploymentSummaryNamespaceLister implements the KusciaDeploymentSummaryNamespaceLister +// interface. +type kusciaDeploymentSummaryNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all KusciaDeploymentSummaries in the indexer for a given namespace. +func (s kusciaDeploymentSummaryNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.KusciaDeploymentSummary, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KusciaDeploymentSummary)) + }) + return ret, err +} + +// Get retrieves the KusciaDeploymentSummary from the indexer for a given namespace and name. +func (s kusciaDeploymentSummaryNamespaceLister) Get(name string) (*v1alpha1.KusciaDeploymentSummary, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("kusciadeploymentsummary"), name) + } + return obj.(*v1alpha1.KusciaDeploymentSummary), nil +} diff --git a/pkg/crd/listers/kuscia/v1alpha1/kusciajobsummary.go b/pkg/crd/listers/kuscia/v1alpha1/kusciajobsummary.go new file mode 100644 index 00000000..79203605 --- /dev/null +++ b/pkg/crd/listers/kuscia/v1alpha1/kusciajobsummary.go @@ -0,0 +1,97 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// KusciaJobSummaryLister helps list KusciaJobSummaries. +// All objects returned here must be treated as read-only. +type KusciaJobSummaryLister interface { + // List lists all KusciaJobSummaries in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KusciaJobSummary, err error) + // KusciaJobSummaries returns an object that can list and get KusciaJobSummaries. + KusciaJobSummaries(namespace string) KusciaJobSummaryNamespaceLister + KusciaJobSummaryListerExpansion +} + +// kusciaJobSummaryLister implements the KusciaJobSummaryLister interface. +type kusciaJobSummaryLister struct { + indexer cache.Indexer +} + +// NewKusciaJobSummaryLister returns a new KusciaJobSummaryLister. +func NewKusciaJobSummaryLister(indexer cache.Indexer) KusciaJobSummaryLister { + return &kusciaJobSummaryLister{indexer: indexer} +} + +// List lists all KusciaJobSummaries in the indexer. +func (s *kusciaJobSummaryLister) List(selector labels.Selector) (ret []*v1alpha1.KusciaJobSummary, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KusciaJobSummary)) + }) + return ret, err +} + +// KusciaJobSummaries returns an object that can list and get KusciaJobSummaries. +func (s *kusciaJobSummaryLister) KusciaJobSummaries(namespace string) KusciaJobSummaryNamespaceLister { + return kusciaJobSummaryNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// KusciaJobSummaryNamespaceLister helps list and get KusciaJobSummaries. +// All objects returned here must be treated as read-only. +type KusciaJobSummaryNamespaceLister interface { + // List lists all KusciaJobSummaries in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KusciaJobSummary, err error) + // Get retrieves the KusciaJobSummary from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.KusciaJobSummary, error) + KusciaJobSummaryNamespaceListerExpansion +} + +// kusciaJobSummaryNamespaceLister implements the KusciaJobSummaryNamespaceLister +// interface. +type kusciaJobSummaryNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all KusciaJobSummaries in the indexer for a given namespace. +func (s kusciaJobSummaryNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.KusciaJobSummary, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KusciaJobSummary)) + }) + return ret, err +} + +// Get retrieves the KusciaJobSummary from the indexer for a given namespace and name. +func (s kusciaJobSummaryNamespaceLister) Get(name string) (*v1alpha1.KusciaJobSummary, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("kusciajobsummary"), name) + } + return obj.(*v1alpha1.KusciaJobSummary), nil +} diff --git a/pkg/crd/listers/kuscia/v1alpha1/kusciatasksummary.go b/pkg/crd/listers/kuscia/v1alpha1/kusciatasksummary.go new file mode 100644 index 00000000..e94b9aef --- /dev/null +++ b/pkg/crd/listers/kuscia/v1alpha1/kusciatasksummary.go @@ -0,0 +1,97 @@ +// Copyright 2023 Ant Group Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/secretflow/kuscia/pkg/crd/apis/kuscia/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// KusciaTaskSummaryLister helps list KusciaTaskSummaries. +// All objects returned here must be treated as read-only. +type KusciaTaskSummaryLister interface { + // List lists all KusciaTaskSummaries in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KusciaTaskSummary, err error) + // KusciaTaskSummaries returns an object that can list and get KusciaTaskSummaries. + KusciaTaskSummaries(namespace string) KusciaTaskSummaryNamespaceLister + KusciaTaskSummaryListerExpansion +} + +// kusciaTaskSummaryLister implements the KusciaTaskSummaryLister interface. +type kusciaTaskSummaryLister struct { + indexer cache.Indexer +} + +// NewKusciaTaskSummaryLister returns a new KusciaTaskSummaryLister. +func NewKusciaTaskSummaryLister(indexer cache.Indexer) KusciaTaskSummaryLister { + return &kusciaTaskSummaryLister{indexer: indexer} +} + +// List lists all KusciaTaskSummaries in the indexer. +func (s *kusciaTaskSummaryLister) List(selector labels.Selector) (ret []*v1alpha1.KusciaTaskSummary, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KusciaTaskSummary)) + }) + return ret, err +} + +// KusciaTaskSummaries returns an object that can list and get KusciaTaskSummaries. +func (s *kusciaTaskSummaryLister) KusciaTaskSummaries(namespace string) KusciaTaskSummaryNamespaceLister { + return kusciaTaskSummaryNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// KusciaTaskSummaryNamespaceLister helps list and get KusciaTaskSummaries. +// All objects returned here must be treated as read-only. +type KusciaTaskSummaryNamespaceLister interface { + // List lists all KusciaTaskSummaries in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KusciaTaskSummary, err error) + // Get retrieves the KusciaTaskSummary from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.KusciaTaskSummary, error) + KusciaTaskSummaryNamespaceListerExpansion +} + +// kusciaTaskSummaryNamespaceLister implements the KusciaTaskSummaryNamespaceLister +// interface. +type kusciaTaskSummaryNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all KusciaTaskSummaries in the indexer for a given namespace. +func (s kusciaTaskSummaryNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.KusciaTaskSummary, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KusciaTaskSummary)) + }) + return ret, err +} + +// Get retrieves the KusciaTaskSummary from the indexer for a given namespace and name. +func (s kusciaTaskSummaryNamespaceLister) Get(name string) (*v1alpha1.KusciaTaskSummary, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("kusciatasksummary"), name) + } + return obj.(*v1alpha1.KusciaTaskSummary), nil +} diff --git a/pkg/gateway/commands/root.go b/pkg/gateway/commands/root.go index 688223f2..a360cccb 100644 --- a/pkg/gateway/commands/root.go +++ b/pkg/gateway/commands/root.go @@ -20,7 +20,9 @@ import ( "path/filepath" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" informers "github.com/secretflow/kuscia/pkg/crd/informers/externalversions" "github.com/secretflow/kuscia/pkg/gateway/clusters" @@ -87,6 +89,7 @@ func Run(ctx context.Context, gwConfig *config.GatewayConfig, clients *kubeconfi } time.Sleep(defaultHandshakeRetryInterval) } + checkMasterProxyReady(ctx, gwConfig.DomainID, clients.KubeClient) } nlog.Infof("Add master clusters success") @@ -167,6 +170,19 @@ func Run(ctx context.Context, gwConfig *config.GatewayConfig, clients *kubeconfi return nil } +func checkMasterProxyReady(ctx context.Context, domainID string, kubeClient kubernetes.Interface) { + var err error + times := 5 + for i := 0; i < times; i++ { + if _, err = kubeClient.CoreV1().Pods(domainID).List(ctx, metav1.ListOptions{Limit: 1}); err == nil { + nlog.Info("Check MasterProxy ready") + return + } + time.Sleep(time.Second) + } + nlog.Fatalf("Check MasterProxy failed: %v", err.Error()) +} + func StartXds(gwConfig *config.GatewayConfig) error { // set route idle timeout xds.IdleTimeout = gwConfig.IdleTimeout diff --git a/pkg/gateway/controller/domain_route.go b/pkg/gateway/controller/domain_route.go index 88b2f790..1b8fd7a0 100644 --- a/pkg/gateway/controller/domain_route.go +++ b/pkg/gateway/controller/domain_route.go @@ -217,7 +217,7 @@ func (c *DomainRouteController) checkConnectionHealthy(ctx context.Context, stop for _, dr := range drs { if dr.Spec.AuthenticationType == kusciaapisv1alpha1.DomainAuthenticationToken && dr.Spec.Source == c.gateway.Namespace && dr.Status.TokenStatus.RevisionInitializer == c.gateway.Name && dr.Status.TokenStatus.RevisionToken.Token != "" { - nlog.Infof("checkConnectionHealthy of dr(%s)", dr.Name) + nlog.Debugf("checkConnectionHealthy of dr(%s)", dr.Name) resp, err := c.checkConnectionStatus(dr) if err != nil { nlog.Warn(err) @@ -362,7 +362,12 @@ func (c *DomainRouteController) updateDomainRoute(dr *kusciaapisv1alpha1.DomainR tokens, err := c.parseToken(dr, key) // Swallow all errors to avoid requeuing if err != nil { - nlog.Error(err) + nlog.Warn(err) + return nil + } + + if len(tokens) == 0 { + nlog.Debugf("DomainRoute %s has no available token", key) return nil } diff --git a/pkg/gateway/controller/handshake.go b/pkg/gateway/controller/handshake.go index 9c92a9c1..a94dab54 100644 --- a/pkg/gateway/controller/handshake.go +++ b/pkg/gateway/controller/handshake.go @@ -118,7 +118,8 @@ func doHTTP(in interface{}, out interface{}, path, host string, headers map[stri time.Sleep(time.Second) continue } - if err := json.Unmarshal(body, out); err != nil { + + if err = json.Unmarshal(body, out); err != nil { nlog.Errorf("Json unmarshal failed, err:%s, body:%s", err.Error(), string(body)) time.Sleep(time.Second) continue @@ -257,9 +258,9 @@ func (c *DomainRouteController) handleGetResponse(out *getResponse, dr *kusciaap case TokenNotReady: return fmt.Errorf("%s destination token is not ready", dr.Name) case NoAuthentication: - if dr.Status.IsDestinationAuthrized { + if dr.Status.IsDestinationAuthorized { dr = dr.DeepCopy() - dr.Status.IsDestinationAuthrized = false + dr.Status.IsDestinationAuthorized = false dr.Status.IsDestinationUnreachable = false dr.Status.TokenStatus = kusciaapisv1alpha1.DomainRouteTokenStatus{} c.kusciaClient.KusciaV1alpha1().DomainRoutes(dr.Namespace).UpdateStatus(context.Background(), dr, metav1.UpdateOptions{}) @@ -405,7 +406,7 @@ func (c *DomainRouteController) sourceInitiateHandShake(dr *kusciaapisv1alpha1.D drLatest, _ := c.domainRouteLister.DomainRoutes(dr.Namespace).Get(dr.Name) drCopy := drLatest.DeepCopy() tn := metav1.Now() - drCopy.Status.IsDestinationAuthrized = true + drCopy.Status.IsDestinationAuthorized = true drCopy.Status.TokenStatus.RevisionToken.Token = tokenEncrypted drCopy.Status.TokenStatus.RevisionToken.Revision = int64(resp.Token.Revision) drCopy.Status.TokenStatus.RevisionToken.IsReady = false @@ -491,7 +492,6 @@ func (c *DomainRouteController) handShakeHandle(w http.ResponseWriter, r *http.R } else { if resp.Status.Code != 0 { nlog.Errorf("DestReplyHandshake for(%s) fail, detail-> %v", drName, resp.Status.Message) - http.Error(w, resp.Status.Message, http.StatusInternalServerError) } else { nlog.Infof("DomainRoute %s handle success", drName) } @@ -568,14 +568,18 @@ func (c *DomainRouteController) DestReplyHandshake(req *handshake.HandShakeReque return false } if needGenerateToken() { - respToken = make([]byte, tokenByteSize) - if _, err = rand.Read(respToken); err != nil { + respToken, err = generateRandomToken(tokenByteSize) + if err != nil { return buildFailedHandshakeReply(500, err) } } else { respToken, err = decryptToken(c.prikey, dstRevisionToken.Token, tokenByteSize) if err != nil { - return buildFailedHandshakeReply(500, fmt.Errorf("source %s %s handshake decryptToken failed, error:%s", req.DomainId, req.Type, err.Error())) + nlog.Warnf("source %s %s handshake decryptToken failed, error:%s", req.DomainId, handShakeTypeUID, err.Error()) + respToken, err = generateRandomToken(tokenByteSize) + if err != nil { + return buildFailedHandshakeReply(500, err) + } } } @@ -670,8 +674,10 @@ func (c *DomainRouteController) parseToken(dr *kusciaapisv1alpha1.DomainRoute, r } switch dr.Spec.TokenConfig.TokenGenMethod { - case kusciaapisv1alpha1.TokenGenMethodRSA, kusciaapisv1alpha1.TokenGenUIDRSA: - tokens, err = c.parseTokenRSA(dr) + case kusciaapisv1alpha1.TokenGenMethodRSA: + tokens, err = c.parseTokenRSA(dr, false) + case kusciaapisv1alpha1.TokenGenUIDRSA: + tokens, err = c.parseTokenRSA(dr, true) default: err = fmt.Errorf("DomainRoute %s unsupported token method: %s", routeKey, dr.Spec.TokenConfig.TokenGenMethod) @@ -679,12 +685,12 @@ func (c *DomainRouteController) parseToken(dr *kusciaapisv1alpha1.DomainRoute, r return tokens, err } -func (c *DomainRouteController) parseTokenRSA(dr *kusciaapisv1alpha1.DomainRoute) ([]*Token, error) { +func (c *DomainRouteController) parseTokenRSA(dr *kusciaapisv1alpha1.DomainRoute, drop bool) ([]*Token, error) { key, _ := cache.MetaNamespaceKeyFunc(dr) var tokens []*Token if len(dr.Status.TokenStatus.Tokens) == 0 { - return tokens, fmt.Errorf("DomainRoute %s has no avaliable token", key) + return tokens, nil } if (c.gateway.Namespace == dr.Spec.Source && dr.Spec.TokenConfig.SourcePublicKey != c.gateway.Status.PublicKey) || @@ -696,7 +702,11 @@ func (c *DomainRouteController) parseTokenRSA(dr *kusciaapisv1alpha1.DomainRoute for _, token := range dr.Status.TokenStatus.Tokens { b, err := decryptToken(c.prikey, token.Token, tokenByteSize) if err != nil { - return []*Token{}, fmt.Errorf("DomainRoute %s decrypt token error: %v", key, err) + if !drop { + return []*Token{}, fmt.Errorf("DomainRoute %s decrypt token error: %v", key, err) + } + nlog.Warnf("DomainRoute %s decrypt token [revision -> %d] error: %v", key, token.Revision, err) + continue } tokens = append(tokens, &Token{Token: base64.StdEncoding.EncodeToString(b), Version: token.Revision}) } @@ -721,7 +731,14 @@ func (c *DomainRouteController) checkAndUpdateTokenInstances(dr *kusciaapisv1alp return err } return nil +} +func generateRandomToken(size int) ([]byte, error) { + respToken := make([]byte, size) + if _, err := rand.Read(respToken); err != nil { + return nil, err + } + return respToken, nil } func encryptToken(pub *rsa.PublicKey, key []byte) (string, error) { @@ -765,12 +782,12 @@ func HandshakeToMaster(domainID string, prikey *rsa.PrivateKey) error { return err } if resp.Status.Code != 0 { - nlog.Errorf("Handshake to master fail, return error:%v", resp.Status.Message) + nlog.Errorf("Handshake to master fail, return error:%v", resp.Status.Message) return errors.New(resp.Status.Message) } token, err := decryptToken(prikey, resp.Token.Token, tokenByteSize) if err != nil { - nlog.Errorf("handshake to master decryptToken err:%s", err.Error()) + nlog.Errorf("Handshake to master decryptToken err:%s", err.Error()) return err } c, err := xds.QueryCluster(clusters.GetMasterClusterName()) diff --git a/pkg/kusciaapi/config/kusciaapi_config.go b/pkg/kusciaapi/config/kusciaapi_config.go index 26bf61aa..290730d4 100644 --- a/pkg/kusciaapi/config/kusciaapi_config.go +++ b/pkg/kusciaapi/config/kusciaapi_config.go @@ -24,7 +24,6 @@ import ( "github.com/secretflow/kuscia/pkg/common" kusciaclientset "github.com/secretflow/kuscia/pkg/crd/clientset/versioned" - "github.com/secretflow/kuscia/pkg/kusciaapi/constants" "github.com/secretflow/kuscia/pkg/web/framework/config" ) @@ -65,12 +64,12 @@ func NewDefaultKusciaAPIConfig(rootDir string) *KusciaAPIConfig { WriteTimeout: 0, // WriteTimeout must be 0 , To support http stream IdleTimeout: 300, TLS: &config.TLSServerConfig{ - ServerKeyFile: path.Join(rootDir, constants.CertPathPrefix, "kusciaapi-server.key"), - ServerCertFile: path.Join(rootDir, constants.CertPathPrefix, "kusciaapi-server.crt"), + ServerKeyFile: path.Join(rootDir, common.CertPrefix, "kusciaapi-server.key"), + ServerCertFile: path.Join(rootDir, common.CertPrefix, "kusciaapi-server.crt"), }, Token: &TokenConfig{ - TokenFile: path.Join(rootDir, constants.CertPathPrefix, "token"), + TokenFile: path.Join(rootDir, common.CertPrefix, "token"), }, - ConfDir: path.Join(rootDir, constants.ConfPathPrefix), + ConfDir: path.Join(rootDir, common.ConfPrefix), } } diff --git a/pkg/kusciaapi/constants/constants.go b/pkg/kusciaapi/constants/constants.go index 4d68df60..d8b26421 100644 --- a/pkg/kusciaapi/constants/constants.go +++ b/pkg/kusciaapi/constants/constants.go @@ -14,9 +14,6 @@ package constants -const CertPathPrefix = "var/tmp" -const ConfPathPrefix = "etc/conf" - const RouteSucceeded = "Succeeded" const RouteFailed = "Failed" diff --git a/pkg/kusciaapi/service/job_service.go b/pkg/kusciaapi/service/job_service.go index 9b7ed8a5..0bc32f2e 100644 --- a/pkg/kusciaapi/service/job_service.go +++ b/pkg/kusciaapi/service/job_service.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "reflect" + "strings" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -415,13 +416,43 @@ func (h *jobService) buildJobStatus(ctx context.Context, kusciaJob *v1alpha1.Kus ts.CreateTime = utils.TimeRfc3339String(&task.CreationTimestamp) ts.StartTime = utils.TimeRfc3339String(taskStatus.StartTime) ts.EndTime = utils.TimeRfc3339String(taskStatus.CompletionTime) - podStatuses := taskStatus.PodStatuses + partyTaskStatus := make(map[string]string) + for _, ps := range taskStatus.PartyTaskStatus { + partyTaskStatus[ps.DomainID] = string(ps.Phase) + } + + partyErrMsg := make(map[string][]string) + for _, podStatus := range taskStatus.PodStatuses { + msg := "" + if podStatus.Message != "" { + msg = fmt.Sprintf("%v;", podStatus.Message) + } + if podStatus.TerminationLog != "" { + msg += podStatus.TerminationLog + } + partyErrMsg[podStatus.Namespace] = append(partyErrMsg[podStatus.Namespace], msg) + } + + partyEndpoints := make(map[string][]*kusciaapi.JobPartyEndpoint) + for _, svcStatus := range taskStatus.ServiceStatuses { + ep := fmt.Sprintf("%v.%v.svc", svcStatus.ServiceName, svcStatus.Namespace) + if svcStatus.Scope == v1alpha1.ScopeDomain { + ep = fmt.Sprintf("%v:%v", ep, svcStatus.PortNumber) + } + partyEndpoints[svcStatus.Namespace] = append(partyEndpoints[svcStatus.Namespace], &kusciaapi.JobPartyEndpoint{ + PortName: svcStatus.PortName, + Scope: string(svcStatus.Scope), + Endpoint: ep, + }) + } + ts.Parties = make([]*kusciaapi.PartyStatus, 0) - for _, podStatus := range podStatuses { + for partyID, _ := range partyErrMsg { ts.Parties = append(ts.Parties, &kusciaapi.PartyStatus{ - DomainId: podStatus.Namespace, - State: string(podStatus.PodPhase), - ErrMsg: podStatus.TerminationLog, + DomainId: partyID, + State: partyTaskStatus[partyID], + ErrMsg: strings.Join(partyErrMsg[partyID], ","), + Endpoints: partyEndpoints[partyID], }) } } diff --git a/pkg/kusciaapi/service/serving_service.go b/pkg/kusciaapi/service/serving_service.go index a93ea9df..fbb97e13 100644 --- a/pkg/kusciaapi/service/serving_service.go +++ b/pkg/kusciaapi/service/serving_service.go @@ -467,15 +467,33 @@ func (s *servingService) buildServingStatusDetail(ctx context.Context, kd *v1alp for domainID, partyDeploymentStatus := range kd.Status.PartyDeploymentStatuses { for deploymentName, statusInfo := range partyDeploymentStatus { services, err := s.kubeClient.CoreV1().Services(domainID).List(ctx, metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet(labels.Set{common.LabelKubernetesDeploymentName: deploymentName, common.LabelPortScope: string(v1alpha1.ScopeCluster)}).String(), + LabelSelector: labels.SelectorFromSet(labels.Set{common.LabelKubernetesDeploymentName: deploymentName}).String(), }) if err != nil { return nil, err } - var endpoints []*kusciaapi.Endpoint + var endpoints []*kusciaapi.ServingPartyEndpoint for _, svc := range services.Items { - endpoints = append(endpoints, &kusciaapi.Endpoint{Endpoint: fmt.Sprintf("%v.%v.svc", svc.Name, svc.Namespace)}) + scope := svc.Labels[common.LabelPortScope] + for _, port := range svc.Spec.Ports { + switch scope { + case string(v1alpha1.ScopeDomain): + endpoints = append(endpoints, &kusciaapi.ServingPartyEndpoint{ + PortName: port.Name, + Scope: scope, + Endpoint: fmt.Sprintf("%v.%v.svc:%v", svc.Name, svc.Namespace, port.Port), + }) + case string(v1alpha1.ScopeCluster): + endpoints = append(endpoints, &kusciaapi.ServingPartyEndpoint{ + PortName: port.Name, + Scope: scope, + Endpoint: fmt.Sprintf("%v.%v.svc", svc.Name, svc.Namespace), + }) + default: + nlog.Warnf("Invalid party %v service %v port scope %v", domainID, svc.Name, scope) + } + } } partyStatuses = append(partyStatuses, &kusciaapi.PartyServingStatus{ diff --git a/pkg/utils/queue/queue.go b/pkg/utils/queue/queue.go index 3e8807f8..d12d0376 100644 --- a/pkg/utils/queue/queue.go +++ b/pkg/utils/queue/queue.go @@ -166,7 +166,7 @@ func HandleQueueItemWithAlwaysRetry(ctx context.Context, queueID string, q workq // Run the handler, passing it the namespace/name string of the Pod resource to be synced. if err := handler(ctx, key); err != nil { // Put the item back on the work queue to handle any transient errors. - nlog.Warnf("Error syncing: queue id[%v], key[%v]: %q, re-queuing (%v)", queueID, key, err.Error(), time.Since(startTime)) + nlog.Warnf("Processing item failed: queue id[%v], key[%v]: %q, re-queuing (%v)", queueID, key, err.Error(), time.Since(startTime)) q.AddRateLimited(key) return } diff --git a/pkg/utils/tls/crypt.go b/pkg/utils/tls/crypt.go index c7308a14..b638e7c2 100644 --- a/pkg/utils/tls/crypt.go +++ b/pkg/utils/tls/crypt.go @@ -146,7 +146,7 @@ func DecryptPKCS1v15(priv *rsa.PrivateKey, ciphertext string, keysize int, prefi i := 0 for ; i < len(prefix); i++ { if key[i] != prefix[i] { - return nil, fmt.Errorf("decrypt error") + return nil, fmt.Errorf("decrypt error, prefix not match") } } return key[len(prefix):], nil @@ -283,15 +283,15 @@ func ParseCert(certData []byte, certFile string) (cert *x509.Certificate, err er return nil, fmt.Errorf("can't parse cert") } -func ParseCertWithGenerated(privateKey *rsa.PrivateKey, domainID string, certData []byte, certFile string) (cert *x509.Certificate, err error) { +func ParseCertWithGenerated(privateKey *rsa.PrivateKey, subject string, certData []byte, certFile string) (cert *x509.Certificate, err error) { if len(certData) != 0 || (certFile != "" && paths.CheckFileExist(certFile)) { return ParseCert(certData, certFile) } - nlog.Infof("Generate cert with key") + nlog.Infof("Generate cert with key, subject[%s]", subject) template := &x509.Certificate{ SerialNumber: big.NewInt(1), - Subject: pkix.Name{CommonName: domainID}, + Subject: pkix.Name{CommonName: subject}, PublicKeyAlgorithm: x509.RSA, SignatureAlgorithm: x509.SHA256WithRSA, NotBefore: time.Now(), diff --git a/pkg/web/framework/config/tls_server_config.go b/pkg/web/framework/config/tls_server_config.go index f0715dab..38a45d06 100644 --- a/pkg/web/framework/config/tls_server_config.go +++ b/pkg/web/framework/config/tls_server_config.go @@ -32,10 +32,10 @@ type TLSServerConfig struct { CommonName string `yaml:"-"` } -func (t *TLSServerConfig) LoadFromDataOrFile() error { +func (t *TLSServerConfig) LoadFromDataOrFile(ipList, dnsList []string) error { var err error if t.ServerKeyData == "" && t.ServerKeyFile != "" && !paths.CheckFileExist(t.ServerKeyFile) { - if err = t.GenerateServerKeyCerts(t.CommonName, nil, nil); err != nil { + if err = t.GenerateServerKeyCerts(t.CommonName, ipList, dnsList); err != nil { return err } } else { diff --git a/proto/api/v1alpha1/kusciaapi/job.pb.go b/proto/api/v1alpha1/kusciaapi/job.pb.go index b9c5b82c..0e807ded 100644 --- a/proto/api/v1alpha1/kusciaapi/job.pb.go +++ b/proto/api/v1alpha1/kusciaapi/job.pb.go @@ -1258,9 +1258,10 @@ type PartyStatus struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - DomainId string `protobuf:"bytes,1,opt,name=domain_id,json=domainId,proto3" json:"domain_id,omitempty"` - State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` - ErrMsg string `protobuf:"bytes,3,opt,name=err_msg,json=errMsg,proto3" json:"err_msg,omitempty"` + DomainId string `protobuf:"bytes,1,opt,name=domain_id,json=domainId,proto3" json:"domain_id,omitempty"` + State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + ErrMsg string `protobuf:"bytes,3,opt,name=err_msg,json=errMsg,proto3" json:"err_msg,omitempty"` + Endpoints []*JobPartyEndpoint `protobuf:"bytes,4,rep,name=endpoints,proto3" json:"endpoints,omitempty"` } func (x *PartyStatus) Reset() { @@ -1316,6 +1317,13 @@ func (x *PartyStatus) GetErrMsg() string { return "" } +func (x *PartyStatus) GetEndpoints() []*JobPartyEndpoint { + if x != nil { + return x.Endpoints + } + return nil +} + type BatchQueryJobStatusRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1748,6 +1756,72 @@ func (x *WatchJobEventResponse) GetObject() *JobStatus { return nil } +type JobPartyEndpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // service port name which defined in AppImage container port. + PortName string `protobuf:"bytes,1,opt,name=port_name,json=portName,proto3" json:"port_name,omitempty"` + // service scope which defined in AppImage container port. + Scope string `protobuf:"bytes,2,opt,name=scope,proto3" json:"scope,omitempty"` + // service access address. + Endpoint string `protobuf:"bytes,3,opt,name=endpoint,proto3" json:"endpoint,omitempty"` +} + +func (x *JobPartyEndpoint) Reset() { + *x = JobPartyEndpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JobPartyEndpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JobPartyEndpoint) ProtoMessage() {} + +func (x *JobPartyEndpoint) ProtoReflect() protoreflect.Message { + mi := &file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JobPartyEndpoint.ProtoReflect.Descriptor instead. +func (*JobPartyEndpoint) Descriptor() ([]byte, []int) { + return file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_rawDescGZIP(), []int{26} +} + +func (x *JobPartyEndpoint) GetPortName() string { + if x != nil { + return x.PortName + } + return "" +} + +func (x *JobPartyEndpoint) GetScope() string { + if x != nil { + return x.Scope + } + return "" +} + +func (x *JobPartyEndpoint) GetEndpoint() string { + if x != nil { + return x.Endpoint + } + return "" +} + var File_kuscia_proto_api_v1alpha1_kusciaapi_job_proto protoreflect.FileDescriptor var file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_rawDesc = []byte{ @@ -1927,145 +2001,157 @@ var file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_rawDesc = []byte{ 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, - 0x59, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, - 0x0a, 0x09, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x72, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x4d, 0x73, 0x67, 0x22, 0x77, 0x0a, 0x1a, 0x42, 0x61, - 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x6a, 0x6f, - 0x62, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6a, 0x6f, 0x62, - 0x49, 0x64, 0x73, 0x22, 0xb2, 0x01, 0x0a, 0x1b, 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x58, - 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x6b, - 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, - 0x70, 0x69, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x65, 0x0a, 0x1f, 0x42, 0x61, 0x74, 0x63, - 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x42, 0x0a, 0x04, 0x6a, - 0x6f, 0x62, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x6b, 0x75, 0x73, 0x63, - 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, - 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x22, - 0x9e, 0x01, 0x0a, 0x11, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x4e, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, + 0xae, 0x01, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x1b, 0x0a, 0x09, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x72, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x53, 0x0a, 0x09, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, - 0x61, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x22, 0x7c, 0x0a, 0x15, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, - 0x12, 0x4c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x34, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, - 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x70, - 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x0a, 0x06, 0x6a, - 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, - 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x61, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x6f, 0x62, 0x50, 0x61, 0x72, 0x74, 0x79, 0x45, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, + 0x22, 0x77, 0x0a, 0x1a, 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, + 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, + 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x17, 0x0a, 0x07, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x06, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x73, 0x22, 0xb2, 0x01, 0x0a, 0x1b, 0x42, 0x61, + 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6b, 0x75, 0x73, 0x63, + 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x58, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, - 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0x7c, 0x0a, 0x0f, 0x57, 0x61, 0x74, 0x63, 0x68, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0xa3, - 0x01, 0x0a, 0x15, 0x57, 0x61, 0x74, 0x63, 0x68, 0x4a, 0x6f, 0x62, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, + 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x65, + 0x0a, 0x1f, 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x42, 0x0a, 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, + 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x22, 0x9e, 0x01, 0x0a, 0x11, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6b, 0x75, + 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x4e, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x7c, 0x0a, 0x15, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x46, 0x0a, 0x06, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x6b, + 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x6f, 0x62, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x22, 0x70, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x4a, + 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x7c, 0x0a, 0x0f, 0x57, 0x61, 0x74, 0x63, 0x68, 0x4a, + 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6b, 0x75, 0x73, 0x63, + 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x22, 0xa3, 0x01, 0x0a, 0x15, 0x57, 0x61, 0x74, 0x63, 0x68, 0x4a, 0x6f, + 0x62, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, - 0x70, 0x69, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x2a, 0x40, 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x10, 0x00, 0x12, 0x0b, - 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x53, - 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x10, 0x03, 0x2a, 0x4b, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x44, 0x44, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, - 0x0a, 0x08, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, - 0x4f, 0x52, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x45, 0x41, 0x52, 0x54, 0x42, 0x45, 0x41, - 0x54, 0x10, 0x04, 0x32, 0x8e, 0x06, 0x0a, 0x0a, 0x4a, 0x6f, 0x62, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x7a, 0x0a, 0x09, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x12, - 0x35, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x46, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, + 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x61, 0x0a, 0x10, 0x4a, 0x6f, + 0x62, 0x50, 0x61, 0x72, 0x74, 0x79, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2a, 0x40, 0x0a, + 0x09, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x65, + 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, + 0x64, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x03, 0x2a, + 0x4b, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, + 0x41, 0x44, 0x44, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x4f, 0x44, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, + 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x12, 0x0d, 0x0a, + 0x09, 0x48, 0x45, 0x41, 0x52, 0x54, 0x42, 0x45, 0x41, 0x54, 0x10, 0x04, 0x32, 0x8e, 0x06, 0x0a, + 0x0a, 0x4a, 0x6f, 0x62, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7a, 0x0a, 0x09, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x35, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x36, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x08, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x4a, 0x6f, 0x62, 0x12, 0x34, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, + 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x6b, 0x75, 0x73, 0x63, + 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x98, 0x01, 0x0a, 0x13, 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, + 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3f, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x6b, 0x75, 0x73, 0x63, + 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, + 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, 0x07, 0x53, + 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x12, 0x33, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, - 0x0a, 0x08, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x12, 0x34, 0x2e, 0x6b, 0x75, 0x73, - 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, - 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x35, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, - 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x13, 0x42, 0x61, 0x74, 0x63, - 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x3f, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, - 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x40, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, - 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x74, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x12, 0x33, 0x2e, - 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, - 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, - 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a, 0x09, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x35, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, + 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x6f, + 0x70, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x6b, 0x75, + 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, + 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x7a, 0x0a, 0x09, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x35, + 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, + 0x61, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x6b, - 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, + 0x08, 0x57, 0x61, 0x74, 0x63, 0x68, 0x4a, 0x6f, 0x62, 0x12, 0x34, 0x2e, 0x6b, 0x75, 0x73, 0x63, + 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, + 0x57, 0x61, 0x74, 0x63, 0x68, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x3a, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, + 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x4a, 0x6f, 0x62, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x5e, 0x0a, + 0x21, 0x6f, 0x72, 0x67, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, - 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, 0x08, 0x57, 0x61, 0x74, 0x63, 0x68, 0x4a, 0x6f, 0x62, - 0x12, 0x34, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, - 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x4a, 0x6f, 0x62, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x57, 0x61, 0x74, - 0x63, 0x68, 0x4a, 0x6f, 0x62, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x30, 0x01, 0x42, 0x5e, 0x0a, 0x21, 0x6f, 0x72, 0x67, 0x2e, 0x73, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x66, 0x6c, 0x6f, 0x77, - 0x2f, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x6b, 0x75, 0x73, 0x63, 0x69, - 0x61, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x70, 0x69, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2f, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2081,7 +2167,7 @@ func file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_rawDescGZIP() []byte { } var file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_msgTypes = make([]protoimpl.MessageInfo, 26) +var file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_msgTypes = make([]protoimpl.MessageInfo, 27) var file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_goTypes = []interface{}{ (TaskState)(0), // 0: kuscia.proto.api.v1alpha1.kusciaapi.TaskState (EventType)(0), // 1: kuscia.proto.api.v1alpha1.kusciaapi.EventType @@ -2111,57 +2197,59 @@ var file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_goTypes = []interface{}{ (*JobStatus)(nil), // 25: kuscia.proto.api.v1alpha1.kusciaapi.JobStatus (*WatchJobRequest)(nil), // 26: kuscia.proto.api.v1alpha1.kusciaapi.WatchJobRequest (*WatchJobEventResponse)(nil), // 27: kuscia.proto.api.v1alpha1.kusciaapi.WatchJobEventResponse - (*v1alpha1.RequestHeader)(nil), // 28: kuscia.proto.api.v1alpha1.RequestHeader - (*v1alpha1.Status)(nil), // 29: kuscia.proto.api.v1alpha1.Status + (*JobPartyEndpoint)(nil), // 28: kuscia.proto.api.v1alpha1.kusciaapi.JobPartyEndpoint + (*v1alpha1.RequestHeader)(nil), // 29: kuscia.proto.api.v1alpha1.RequestHeader + (*v1alpha1.Status)(nil), // 30: kuscia.proto.api.v1alpha1.Status } var file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_depIdxs = []int32{ - 28, // 0: kuscia.proto.api.v1alpha1.kusciaapi.CreateJobRequest.header:type_name -> kuscia.proto.api.v1alpha1.RequestHeader + 29, // 0: kuscia.proto.api.v1alpha1.kusciaapi.CreateJobRequest.header:type_name -> kuscia.proto.api.v1alpha1.RequestHeader 5, // 1: kuscia.proto.api.v1alpha1.kusciaapi.CreateJobRequest.tasks:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.Task - 29, // 2: kuscia.proto.api.v1alpha1.kusciaapi.CreateJobResponse.status:type_name -> kuscia.proto.api.v1alpha1.Status + 30, // 2: kuscia.proto.api.v1alpha1.kusciaapi.CreateJobResponse.status:type_name -> kuscia.proto.api.v1alpha1.Status 4, // 3: kuscia.proto.api.v1alpha1.kusciaapi.CreateJobResponse.data:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.CreateJobResponseData 6, // 4: kuscia.proto.api.v1alpha1.kusciaapi.Task.parties:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.Party - 28, // 5: kuscia.proto.api.v1alpha1.kusciaapi.DeleteJobRequest.header:type_name -> kuscia.proto.api.v1alpha1.RequestHeader - 29, // 6: kuscia.proto.api.v1alpha1.kusciaapi.DeleteJobResponse.status:type_name -> kuscia.proto.api.v1alpha1.Status + 29, // 5: kuscia.proto.api.v1alpha1.kusciaapi.DeleteJobRequest.header:type_name -> kuscia.proto.api.v1alpha1.RequestHeader + 30, // 6: kuscia.proto.api.v1alpha1.kusciaapi.DeleteJobResponse.status:type_name -> kuscia.proto.api.v1alpha1.Status 9, // 7: kuscia.proto.api.v1alpha1.kusciaapi.DeleteJobResponse.data:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.DeleteJobResponseData - 28, // 8: kuscia.proto.api.v1alpha1.kusciaapi.StopJobRequest.header:type_name -> kuscia.proto.api.v1alpha1.RequestHeader - 29, // 9: kuscia.proto.api.v1alpha1.kusciaapi.StopJobResponse.status:type_name -> kuscia.proto.api.v1alpha1.Status + 29, // 8: kuscia.proto.api.v1alpha1.kusciaapi.StopJobRequest.header:type_name -> kuscia.proto.api.v1alpha1.RequestHeader + 30, // 9: kuscia.proto.api.v1alpha1.kusciaapi.StopJobResponse.status:type_name -> kuscia.proto.api.v1alpha1.Status 12, // 10: kuscia.proto.api.v1alpha1.kusciaapi.StopJobResponse.data:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.StopJobResponseData - 28, // 11: kuscia.proto.api.v1alpha1.kusciaapi.QueryJobRequest.header:type_name -> kuscia.proto.api.v1alpha1.RequestHeader - 29, // 12: kuscia.proto.api.v1alpha1.kusciaapi.QueryJobResponse.status:type_name -> kuscia.proto.api.v1alpha1.Status + 29, // 11: kuscia.proto.api.v1alpha1.kusciaapi.QueryJobRequest.header:type_name -> kuscia.proto.api.v1alpha1.RequestHeader + 30, // 12: kuscia.proto.api.v1alpha1.kusciaapi.QueryJobResponse.status:type_name -> kuscia.proto.api.v1alpha1.Status 15, // 13: kuscia.proto.api.v1alpha1.kusciaapi.QueryJobResponse.data:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.QueryJobResponseData 17, // 14: kuscia.proto.api.v1alpha1.kusciaapi.QueryJobResponseData.tasks:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.TaskConfig 16, // 15: kuscia.proto.api.v1alpha1.kusciaapi.QueryJobResponseData.status:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.JobStatusDetail 18, // 16: kuscia.proto.api.v1alpha1.kusciaapi.JobStatusDetail.tasks:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.TaskStatus 6, // 17: kuscia.proto.api.v1alpha1.kusciaapi.TaskConfig.parties:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.Party 19, // 18: kuscia.proto.api.v1alpha1.kusciaapi.TaskStatus.parties:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.PartyStatus - 28, // 19: kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusRequest.header:type_name -> kuscia.proto.api.v1alpha1.RequestHeader - 29, // 20: kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusResponse.status:type_name -> kuscia.proto.api.v1alpha1.Status - 22, // 21: kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusResponse.data:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusResponseData - 25, // 22: kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusResponseData.jobs:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.JobStatus - 29, // 23: kuscia.proto.api.v1alpha1.kusciaapi.JobStatusResponse.status:type_name -> kuscia.proto.api.v1alpha1.Status - 24, // 24: kuscia.proto.api.v1alpha1.kusciaapi.JobStatusResponse.data:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.JobStatusResponseData - 16, // 25: kuscia.proto.api.v1alpha1.kusciaapi.JobStatusResponseData.status:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.JobStatusDetail - 16, // 26: kuscia.proto.api.v1alpha1.kusciaapi.JobStatus.status:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.JobStatusDetail - 28, // 27: kuscia.proto.api.v1alpha1.kusciaapi.WatchJobRequest.header:type_name -> kuscia.proto.api.v1alpha1.RequestHeader - 1, // 28: kuscia.proto.api.v1alpha1.kusciaapi.WatchJobEventResponse.type:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.EventType - 25, // 29: kuscia.proto.api.v1alpha1.kusciaapi.WatchJobEventResponse.object:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.JobStatus - 2, // 30: kuscia.proto.api.v1alpha1.kusciaapi.JobService.CreateJob:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.CreateJobRequest - 13, // 31: kuscia.proto.api.v1alpha1.kusciaapi.JobService.QueryJob:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.QueryJobRequest - 20, // 32: kuscia.proto.api.v1alpha1.kusciaapi.JobService.BatchQueryJobStatus:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusRequest - 10, // 33: kuscia.proto.api.v1alpha1.kusciaapi.JobService.StopJob:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.StopJobRequest - 7, // 34: kuscia.proto.api.v1alpha1.kusciaapi.JobService.DeleteJob:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.DeleteJobRequest - 26, // 35: kuscia.proto.api.v1alpha1.kusciaapi.JobService.WatchJob:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.WatchJobRequest - 3, // 36: kuscia.proto.api.v1alpha1.kusciaapi.JobService.CreateJob:output_type -> kuscia.proto.api.v1alpha1.kusciaapi.CreateJobResponse - 14, // 37: kuscia.proto.api.v1alpha1.kusciaapi.JobService.QueryJob:output_type -> kuscia.proto.api.v1alpha1.kusciaapi.QueryJobResponse - 21, // 38: kuscia.proto.api.v1alpha1.kusciaapi.JobService.BatchQueryJobStatus:output_type -> kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusResponse - 11, // 39: kuscia.proto.api.v1alpha1.kusciaapi.JobService.StopJob:output_type -> kuscia.proto.api.v1alpha1.kusciaapi.StopJobResponse - 8, // 40: kuscia.proto.api.v1alpha1.kusciaapi.JobService.DeleteJob:output_type -> kuscia.proto.api.v1alpha1.kusciaapi.DeleteJobResponse - 27, // 41: kuscia.proto.api.v1alpha1.kusciaapi.JobService.WatchJob:output_type -> kuscia.proto.api.v1alpha1.kusciaapi.WatchJobEventResponse - 36, // [36:42] is the sub-list for method output_type - 30, // [30:36] is the sub-list for method input_type - 30, // [30:30] is the sub-list for extension type_name - 30, // [30:30] is the sub-list for extension extendee - 0, // [0:30] is the sub-list for field type_name + 28, // 19: kuscia.proto.api.v1alpha1.kusciaapi.PartyStatus.endpoints:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.JobPartyEndpoint + 29, // 20: kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusRequest.header:type_name -> kuscia.proto.api.v1alpha1.RequestHeader + 30, // 21: kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusResponse.status:type_name -> kuscia.proto.api.v1alpha1.Status + 22, // 22: kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusResponse.data:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusResponseData + 25, // 23: kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusResponseData.jobs:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.JobStatus + 30, // 24: kuscia.proto.api.v1alpha1.kusciaapi.JobStatusResponse.status:type_name -> kuscia.proto.api.v1alpha1.Status + 24, // 25: kuscia.proto.api.v1alpha1.kusciaapi.JobStatusResponse.data:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.JobStatusResponseData + 16, // 26: kuscia.proto.api.v1alpha1.kusciaapi.JobStatusResponseData.status:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.JobStatusDetail + 16, // 27: kuscia.proto.api.v1alpha1.kusciaapi.JobStatus.status:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.JobStatusDetail + 29, // 28: kuscia.proto.api.v1alpha1.kusciaapi.WatchJobRequest.header:type_name -> kuscia.proto.api.v1alpha1.RequestHeader + 1, // 29: kuscia.proto.api.v1alpha1.kusciaapi.WatchJobEventResponse.type:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.EventType + 25, // 30: kuscia.proto.api.v1alpha1.kusciaapi.WatchJobEventResponse.object:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.JobStatus + 2, // 31: kuscia.proto.api.v1alpha1.kusciaapi.JobService.CreateJob:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.CreateJobRequest + 13, // 32: kuscia.proto.api.v1alpha1.kusciaapi.JobService.QueryJob:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.QueryJobRequest + 20, // 33: kuscia.proto.api.v1alpha1.kusciaapi.JobService.BatchQueryJobStatus:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusRequest + 10, // 34: kuscia.proto.api.v1alpha1.kusciaapi.JobService.StopJob:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.StopJobRequest + 7, // 35: kuscia.proto.api.v1alpha1.kusciaapi.JobService.DeleteJob:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.DeleteJobRequest + 26, // 36: kuscia.proto.api.v1alpha1.kusciaapi.JobService.WatchJob:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.WatchJobRequest + 3, // 37: kuscia.proto.api.v1alpha1.kusciaapi.JobService.CreateJob:output_type -> kuscia.proto.api.v1alpha1.kusciaapi.CreateJobResponse + 14, // 38: kuscia.proto.api.v1alpha1.kusciaapi.JobService.QueryJob:output_type -> kuscia.proto.api.v1alpha1.kusciaapi.QueryJobResponse + 21, // 39: kuscia.proto.api.v1alpha1.kusciaapi.JobService.BatchQueryJobStatus:output_type -> kuscia.proto.api.v1alpha1.kusciaapi.BatchQueryJobStatusResponse + 11, // 40: kuscia.proto.api.v1alpha1.kusciaapi.JobService.StopJob:output_type -> kuscia.proto.api.v1alpha1.kusciaapi.StopJobResponse + 8, // 41: kuscia.proto.api.v1alpha1.kusciaapi.JobService.DeleteJob:output_type -> kuscia.proto.api.v1alpha1.kusciaapi.DeleteJobResponse + 27, // 42: kuscia.proto.api.v1alpha1.kusciaapi.JobService.WatchJob:output_type -> kuscia.proto.api.v1alpha1.kusciaapi.WatchJobEventResponse + 37, // [37:43] is the sub-list for method output_type + 31, // [31:37] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name } func init() { file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_init() } @@ -2482,6 +2570,18 @@ func file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_init() { return nil } } + file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JobPartyEndpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -2489,7 +2589,7 @@ func file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_kuscia_proto_api_v1alpha1_kusciaapi_job_proto_rawDesc, NumEnums: 2, - NumMessages: 26, + NumMessages: 27, NumExtensions: 0, NumServices: 1, }, diff --git a/proto/api/v1alpha1/kusciaapi/job.proto b/proto/api/v1alpha1/kusciaapi/job.proto index 7fc6182f..50586c65 100644 --- a/proto/api/v1alpha1/kusciaapi/job.proto +++ b/proto/api/v1alpha1/kusciaapi/job.proto @@ -146,6 +146,7 @@ message PartyStatus { string domain_id = 1; string state = 2; string err_msg = 3; + repeated JobPartyEndpoint endpoints = 4; } enum TaskState { @@ -201,3 +202,12 @@ enum EventType { ERROR = 3; HEARTBEAT = 4; } + +message JobPartyEndpoint { + // service port name which defined in AppImage container port. + string port_name = 1; + // service scope which defined in AppImage container port. + string scope = 2; + // service access address. + string endpoint = 3; +} \ No newline at end of file diff --git a/proto/api/v1alpha1/kusciaapi/serving.pb.go b/proto/api/v1alpha1/kusciaapi/serving.pb.go index 2726f272..79f2e842 100644 --- a/proto/api/v1alpha1/kusciaapi/serving.pb.go +++ b/proto/api/v1alpha1/kusciaapi/serving.pb.go @@ -1111,16 +1111,15 @@ type PartyServingStatus struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - DomainId string `protobuf:"bytes,1,opt,name=domain_id,json=domainId,proto3" json:"domain_id,omitempty"` - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` - Replicas int32 `protobuf:"varint,4,opt,name=replicas,proto3" json:"replicas,omitempty"` - AvailableReplicas int32 `protobuf:"varint,5,opt,name=available_replicas,json=availableReplicas,proto3" json:"available_replicas,omitempty"` - UnavailableReplicas int32 `protobuf:"varint,6,opt,name=unavailable_replicas,json=unavailableReplicas,proto3" json:"unavailable_replicas,omitempty"` - UpdatedReplicas int32 `protobuf:"varint,7,opt,name=updatedReplicas,proto3" json:"updatedReplicas,omitempty"` - CreateTime string `protobuf:"bytes,8,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // only contain cluster scope service - Endpoints []*Endpoint `protobuf:"bytes,9,rep,name=endpoints,proto3" json:"endpoints,omitempty"` + DomainId string `protobuf:"bytes,1,opt,name=domain_id,json=domainId,proto3" json:"domain_id,omitempty"` + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` + Replicas int32 `protobuf:"varint,4,opt,name=replicas,proto3" json:"replicas,omitempty"` + AvailableReplicas int32 `protobuf:"varint,5,opt,name=available_replicas,json=availableReplicas,proto3" json:"available_replicas,omitempty"` + UnavailableReplicas int32 `protobuf:"varint,6,opt,name=unavailable_replicas,json=unavailableReplicas,proto3" json:"unavailable_replicas,omitempty"` + UpdatedReplicas int32 `protobuf:"varint,7,opt,name=updatedReplicas,proto3" json:"updatedReplicas,omitempty"` + CreateTime string `protobuf:"bytes,8,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + Endpoints []*ServingPartyEndpoint `protobuf:"bytes,9,rep,name=endpoints,proto3" json:"endpoints,omitempty"` } func (x *PartyServingStatus) Reset() { @@ -1211,23 +1210,28 @@ func (x *PartyServingStatus) GetCreateTime() string { return "" } -func (x *PartyServingStatus) GetEndpoints() []*Endpoint { +func (x *PartyServingStatus) GetEndpoints() []*ServingPartyEndpoint { if x != nil { return x.Endpoints } return nil } -type Endpoint struct { +type ServingPartyEndpoint struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Endpoint string `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + // service port name which defined in AppImage container port. + PortName string `protobuf:"bytes,1,opt,name=port_name,json=portName,proto3" json:"port_name,omitempty"` + // service scope which defined in AppImage container port. + Scope string `protobuf:"bytes,2,opt,name=scope,proto3" json:"scope,omitempty"` + // service access address. + Endpoint string `protobuf:"bytes,3,opt,name=endpoint,proto3" json:"endpoint,omitempty"` } -func (x *Endpoint) Reset() { - *x = Endpoint{} +func (x *ServingPartyEndpoint) Reset() { + *x = ServingPartyEndpoint{} if protoimpl.UnsafeEnabled { mi := &file_kuscia_proto_api_v1alpha1_kusciaapi_serving_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1235,13 +1239,13 @@ func (x *Endpoint) Reset() { } } -func (x *Endpoint) String() string { +func (x *ServingPartyEndpoint) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Endpoint) ProtoMessage() {} +func (*ServingPartyEndpoint) ProtoMessage() {} -func (x *Endpoint) ProtoReflect() protoreflect.Message { +func (x *ServingPartyEndpoint) ProtoReflect() protoreflect.Message { mi := &file_kuscia_proto_api_v1alpha1_kusciaapi_serving_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1253,12 +1257,26 @@ func (x *Endpoint) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Endpoint.ProtoReflect.Descriptor instead. -func (*Endpoint) Descriptor() ([]byte, []int) { +// Deprecated: Use ServingPartyEndpoint.ProtoReflect.Descriptor instead. +func (*ServingPartyEndpoint) Descriptor() ([]byte, []int) { return file_kuscia_proto_api_v1alpha1_kusciaapi_serving_proto_rawDescGZIP(), []int{18} } -func (x *Endpoint) GetEndpoint() string { +func (x *ServingPartyEndpoint) GetPortName() string { + if x != nil { + return x.PortName + } + return "" +} + +func (x *ServingPartyEndpoint) GetScope() string { + if x != nil { + return x.Scope + } + return "" +} + +func (x *ServingPartyEndpoint) GetEndpoint() string { if x != nil { return x.Endpoint } @@ -1458,7 +1476,7 @@ var file_kuscia_proto_api_v1alpha1_kusciaapi_serving_proto_rawDesc = []byte{ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x74, 0x79, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x22, 0xf1, 0x02, 0x0a, 0x12, 0x50, 0x61, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x22, 0xfd, 0x02, 0x0a, 0x12, 0x50, 0x61, 0x72, 0x74, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, @@ -1477,66 +1495,71 @@ var file_kuscia_proto_api_v1alpha1_kusciaapi_serving_proto_rawDesc = []byte{ 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x4b, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x09, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x57, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x09, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, - 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0x26, 0x0a, 0x08, - 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x32, 0xd8, 0x05, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x86, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x39, 0x2e, 0x6b, 0x75, 0x73, 0x63, - 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, + 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, + 0x50, 0x61, 0x72, 0x74, 0x79, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x09, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0x65, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x74, 0x79, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x32, + 0xd8, 0x05, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x86, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x6e, 0x67, 0x12, 0x39, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x83, 0x01, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x12, 0x38, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, - 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x6b, 0x75, - 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, - 0x69, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x86, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x39, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x3a, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, + 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x83, 0x01, 0x0a, 0x0c, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x38, 0x2e, 0x6b, + 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, + 0x70, 0x69, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x86, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x6e, 0x67, 0x12, 0x39, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x86, 0x01, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x12, 0x39, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, - 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x6b, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, + 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, + 0x61, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x86, 0x01, 0x0a, 0x0d, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x39, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xa4, 0x01, 0x0a, 0x17, 0x42, 0x61, 0x74, - 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x43, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0xa4, 0x01, 0x0a, 0x17, 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x43, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, + 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x44, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x44, 0x2e, 0x6b, 0x75, 0x73, 0x63, - 0x69, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x2e, - 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, - 0x5e, 0x0a, 0x21, 0x6f, 0x72, 0x67, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, - 0x61, 0x61, 0x70, 0x69, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x6b, 0x75, 0x73, 0x63, - 0x69, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x5e, 0x0a, 0x21, 0x6f, 0x72, + 0x67, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x5a, + 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2f, 0x6b, 0x75, 0x73, 0x63, 0x69, 0x61, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -1571,7 +1594,7 @@ var file_kuscia_proto_api_v1alpha1_kusciaapi_serving_proto_goTypes = []interface (*ServingStatus)(nil), // 15: kuscia.proto.api.v1alpha1.kusciaapi.ServingStatus (*ServingStatusDetail)(nil), // 16: kuscia.proto.api.v1alpha1.kusciaapi.ServingStatusDetail (*PartyServingStatus)(nil), // 17: kuscia.proto.api.v1alpha1.kusciaapi.PartyServingStatus - (*Endpoint)(nil), // 18: kuscia.proto.api.v1alpha1.kusciaapi.Endpoint + (*ServingPartyEndpoint)(nil), // 18: kuscia.proto.api.v1alpha1.kusciaapi.ServingPartyEndpoint (*v1alpha1.RequestHeader)(nil), // 19: kuscia.proto.api.v1alpha1.RequestHeader (*v1alpha1.Status)(nil), // 20: kuscia.proto.api.v1alpha1.Status } @@ -1597,7 +1620,7 @@ var file_kuscia_proto_api_v1alpha1_kusciaapi_serving_proto_depIdxs = []int32{ 13, // 18: kuscia.proto.api.v1alpha1.kusciaapi.ServingParty.resources:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.Resource 16, // 19: kuscia.proto.api.v1alpha1.kusciaapi.ServingStatus.status:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.ServingStatusDetail 17, // 20: kuscia.proto.api.v1alpha1.kusciaapi.ServingStatusDetail.party_statuses:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.PartyServingStatus - 18, // 21: kuscia.proto.api.v1alpha1.kusciaapi.PartyServingStatus.endpoints:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.Endpoint + 18, // 21: kuscia.proto.api.v1alpha1.kusciaapi.PartyServingStatus.endpoints:type_name -> kuscia.proto.api.v1alpha1.kusciaapi.ServingPartyEndpoint 0, // 22: kuscia.proto.api.v1alpha1.kusciaapi.ServingService.CreateServing:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.CreateServingRequest 2, // 23: kuscia.proto.api.v1alpha1.kusciaapi.ServingService.QueryServing:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.QueryServingRequest 5, // 24: kuscia.proto.api.v1alpha1.kusciaapi.ServingService.UpdateServing:input_type -> kuscia.proto.api.v1alpha1.kusciaapi.UpdateServingRequest @@ -1838,7 +1861,7 @@ func file_kuscia_proto_api_v1alpha1_kusciaapi_serving_proto_init() { } } file_kuscia_proto_api_v1alpha1_kusciaapi_serving_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Endpoint); i { + switch v := v.(*ServingPartyEndpoint); i { case 0: return &v.state case 1: diff --git a/proto/api/v1alpha1/kusciaapi/serving.proto b/proto/api/v1alpha1/kusciaapi/serving.proto index 24295088..2107b63f 100644 --- a/proto/api/v1alpha1/kusciaapi/serving.proto +++ b/proto/api/v1alpha1/kusciaapi/serving.proto @@ -151,10 +151,14 @@ message PartyServingStatus { int32 unavailable_replicas = 6; int32 updatedReplicas = 7; string create_time = 8; - // only contain cluster scope service - repeated Endpoint endpoints = 9; + repeated ServingPartyEndpoint endpoints = 9; } -message Endpoint { - string endpoint = 1; +message ServingPartyEndpoint { + // service port name which defined in AppImage container port. + string port_name = 1; + // service scope which defined in AppImage container port. + string scope = 2; + // service access address. + string endpoint = 3; } \ No newline at end of file diff --git a/scripts/deploy/add_domain.sh b/scripts/deploy/add_domain.sh index e935b0ad..2870ee3c 100755 --- a/scripts/deploy/add_domain.sh +++ b/scripts/deploy/add_domain.sh @@ -40,7 +40,7 @@ if [[ $SELF_DOMAIN_ID == "" ]]; then fi ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P) -CERT=$(base64 $ROOT/var/tmp/${DOMAIN_ID}.domain.crt | tr -d "\n") +CERT=$(base64 $ROOT/var/certs/${DOMAIN_ID}.domain.crt | tr -d "\n") DOMAIN_TEMPLATE=" apiVersion: kuscia.secretflow/v1alpha1 diff --git a/scripts/deploy/deploy.sh b/scripts/deploy/deploy.sh index 63809f95..01042f39 100755 --- a/scripts/deploy/deploy.sh +++ b/scripts/deploy/deploy.sh @@ -53,8 +53,7 @@ log "SECRETFLOW_IMAGE=${SECRETFLOW_IMAGE}" SF_IMAGE_REGISTRY="secretflow-registry.cn-hangzhou.cr.aliyuncs.com/secretflow" CTR_ROOT=/home/kuscia -CTR_TMP_ROOT=${CTR_ROOT}/var/tmp -CTR_CERT_ROOT=${CTR_ROOT}/var/tmp +CTR_CERT_ROOT=${CTR_ROOT}/var/certs MASTER_MEMORY_LIMIT=2G LITE_MEMORY_LIMIT=4G AUTONOMY_MEMORY_LIMIT=6G @@ -126,7 +125,7 @@ function do_https_probe() { local retry=0 while [ $retry -lt "$max_retry" ]; do local status_code - status_code=$(docker exec -it $ctr curl -k --write-out '%{http_code}' --silent --output /dev/null "${endpoint}" --cacert ${CTR_TMP_ROOT}/ca.crt --cert ${CTR_TMP_ROOT}/ca.crt --key ${CTR_TMP_ROOT}/ca.key ) + status_code=$(docker exec -it $ctr curl -k --write-out '%{http_code}' --silent --output /dev/null "${endpoint}" --cacert ${CTR_CERT_ROOT}/ca.crt --cert ${CTR_CERT_ROOT}/ca.crt --key ${CTR_CERT_ROOT}/ca.key ) if [[ $status_code -eq 200 || $status_code -eq 404 || $status_code -eq 401 ]]; then return 0 fi diff --git a/scripts/deploy/generate_rsa_key.sh b/scripts/deploy/generate_rsa_key.sh new file mode 100755 index 00000000..c35c219b --- /dev/null +++ b/scripts/deploy/generate_rsa_key.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e +GREEN='\033[0;32m' +NC='\033[0m' +KEY=$(openssl genrsa 2048 2>/dev/null | base64 | tr -d "\n" && echo) +echo -e "${GREEN}生成节点私钥配置:\n\n$KEY\n${NC}" \ No newline at end of file diff --git a/scripts/deploy/init_kusciaapi_client_certs.sh b/scripts/deploy/init_kusciaapi_client_certs.sh index 29c62134..cceeed7c 100755 --- a/scripts/deploy/init_kusciaapi_client_certs.sh +++ b/scripts/deploy/init_kusciaapi_client_certs.sh @@ -17,7 +17,7 @@ set -e ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P) -pushd ${ROOT}/var/tmp >/dev/null || exit +pushd ${ROOT}/var/certs >/dev/null || exit CLIENT=kusciaapi-client diff --git a/scripts/deploy/start_secretpad.sh b/scripts/deploy/start_secretpad.sh index 2e7a6a0f..2e07fda7 100644 --- a/scripts/deploy/start_secretpad.sh +++ b/scripts/deploy/start_secretpad.sh @@ -23,7 +23,7 @@ NC='\033[0m' ROOT=$(pwd) CTR_ROOT=/home/kuscia -CTR_CERT_ROOT=${CTR_ROOT}/var/tmp +CTR_CERT_ROOT=${CTR_ROOT}/var/certs CTR_PREFIX=${USER}-kuscia MASTER_CTR=${CTR_PREFIX}-master FORCE_START=false diff --git a/scripts/deploy/start_standalone.sh b/scripts/deploy/start_standalone.sh index aa8756d9..63267786 100755 --- a/scripts/deploy/start_standalone.sh +++ b/scripts/deploy/start_standalone.sh @@ -42,8 +42,7 @@ fi CTR_PREFIX=${USER}-kuscia CTR_ROOT=/home/kuscia -CTR_CERT_ROOT=${CTR_ROOT}/var/tmp -CTR_TMP_ROOT=${CTR_ROOT}/var/tmp +CTR_CERT_ROOT=${CTR_ROOT}/var/certs MASTER_DOMAIN="kuscia-system" ALICE_DOMAIN="alice" BOB_DOMAIN="bob" @@ -281,10 +280,10 @@ function copy_kuscia_api_client_certs() { # copy result tmp_path=${volume_path}/temp/certs mkdir -p ${tmp_path} - docker cp ${MASTER_CTR}:/${CTR_TMP_ROOT}/ca.crt ${tmp_path}/ca.crt - docker cp ${MASTER_CTR}:/${CTR_TMP_ROOT}/kusciaapi-client.crt ${tmp_path}/client.crt - docker cp ${MASTER_CTR}:/${CTR_TMP_ROOT}/kusciaapi-client.key ${tmp_path}/client.pem - docker cp ${MASTER_CTR}:/${CTR_TMP_ROOT}/token ${tmp_path}/token + docker cp ${MASTER_CTR}:/${CTR_CERT_ROOT}/ca.crt ${tmp_path}/ca.crt + docker cp ${MASTER_CTR}:/${CTR_CERT_ROOT}/kusciaapi-client.crt ${tmp_path}/client.crt + docker cp ${MASTER_CTR}:/${CTR_CERT_ROOT}/kusciaapi-client.key ${tmp_path}/client.pem + docker cp ${MASTER_CTR}:/${CTR_CERT_ROOT}/token ${tmp_path}/token docker run -d --rm --name ${CTR_PREFIX}-dummy --volume=${volume_path}/secretpad/config:/tmp/temp $IMAGE tail -f /dev/null >/dev/null 2>&1 docker cp -a ${tmp_path} ${CTR_PREFIX}-dummy:/tmp/temp/ docker rm -f ${CTR_PREFIX}-dummy >/dev/null 2>&1 @@ -347,7 +346,8 @@ function create_domaindatagrant_alice2bob() { local ctr=$1 probe_datamesh $ctr docker exec -it ${ctr} curl https://127.0.0.1:8070/api/v1/datamesh/domaindatagrant/create -X POST -H 'content-type: application/json' -d '{"author":"alice","domaindata_id":"alice-table","grant_domain":"bob"}' \ - --cacert ${CTR_TMP_ROOT}/ca.crt --cert ${CTR_TMP_ROOT}/ca.crt --key ${CTR_TMP_ROOT}/ca.key + --cacert ${CTR_CERT_ROOT}/ca.crt --cert ${CTR_CERT_ROOT}/ca.crt --key ${CTR_CERT_ROOT}/ca.key + echo } function create_domaindata_alice_table() { @@ -364,7 +364,8 @@ function create_domaindatagrant_bob2alice() { local ctr=$1 probe_datamesh $ctr docker exec -it ${ctr} curl https://127.0.0.1:8070/api/v1/datamesh/domaindatagrant/create -X POST -H 'content-type: application/json' -d '{"author":"bob","domaindata_id":"bob-table","grant_domain":"alice"}' \ - --cacert ${CTR_TMP_ROOT}/ca.crt --cert ${CTR_TMP_ROOT}/ca.crt --key ${CTR_TMP_ROOT}/ca.key + --cacert ${CTR_CERT_ROOT}/ca.crt --cert ${CTR_CERT_ROOT}/ca.crt --key ${CTR_CERT_ROOT}/ca.key + echo } function create_domaindata_bob_table() { @@ -384,7 +385,7 @@ function probe_datamesh() { local retry=0 while [ $retry -lt "$max_retry" ]; do local status_code - status_code=$(docker exec -it $ctr curl -k --write-out '%{http_code}' --silent --output /dev/null "${endpoint}" -d'{}' --cacert ${CTR_TMP_ROOT}/ca.crt --cert ${CTR_TMP_ROOT}/ca.crt --key ${CTR_TMP_ROOT}/ca.key || true) + status_code=$(docker exec -it $ctr curl -k --write-out '%{http_code}' --silent --output /dev/null "${endpoint}" -d'{}' --cacert ${CTR_CERT_ROOT}/ca.crt --cert ${CTR_CERT_ROOT}/ca.crt --key ${CTR_CERT_ROOT}/ca.key || true) if [[ $status_code -eq 200 ]]; then log "Probe ${domain_ctr} datamesh successfully" return 0 @@ -733,7 +734,7 @@ function build_interconn() { local host_ctr=${CTR_PREFIX}-autonomy-${host_domain} log "Starting build internet connect from '${member_domain}' to '${host_domain}'" - copy_between_containers ${member_ctr}:${CTR_TMP_ROOT}/domain.crt ${host_ctr}:${CTR_CERT_ROOT}/${member_domain}.domain.crt + copy_between_containers ${member_ctr}:${CTR_CERT_ROOT}/domain.crt ${host_ctr}:${CTR_CERT_ROOT}/${member_domain}.domain.crt docker exec -it ${host_ctr} scripts/deploy/add_domain.sh $member_domain p2p ${interconn_protocol} docker exec -it ${member_ctr} scripts/deploy/join_to_host.sh $member_domain $host_domain https://${host_ctr}:1080 -p ${interconn_protocol} diff --git a/scripts/templates/app_image.ezpsi.yaml b/scripts/templates/app_image.ezpsi.yaml deleted file mode 100644 index e66deeb2..00000000 --- a/scripts/templates/app_image.ezpsi.yaml +++ /dev/null @@ -1,39 +0,0 @@ -apiVersion: kuscia.secretflow/v1alpha1 -kind: AppImage -metadata: - name: ezpsi-image -spec: - configTemplates: - task-config.conf: | - { - "task_id": "{{.TASK_ID}}", - "task_input_config": "{{.TASK_INPUT_CONFIG}}", - "task_cluster_def": "{{.TASK_CLUSTER_DEFINE}}", - "allocated_ports": "{{.ALLOCATED_PORTS}}" - } - deployTemplates: - - name: ezpsi - replicas: 1 - spec: - containers: - - args: - - -c - - /root/main --kuscia /etc/kuscia/task-config.conf - command: - - sh - configVolumeMounts: - - mountPath: /etc/kuscia/task-config.conf - subPath: task-config.conf - name: secretflow - ports: - - name: ezpsi - port: 54509 - protocol: HTTP - scope: Cluster - workingDir: /work - restartPolicy: Never - image: - id: abc - name: {{.SF_IMAGE_NAME}} - sign: abc - tag: {{.SF_IMAGE_TAG}} \ No newline at end of file diff --git a/scripts/test/suite/core/functions.sh b/scripts/test/suite/core/functions.sh index 4b160c86..8c72cbe5 100644 --- a/scripts/test/suite/core/functions.sh +++ b/scripts/test/suite/core/functions.sh @@ -168,10 +168,10 @@ function start_center_mode() { mkdir -p "${test_suite_run_kuscia_dir}"/master ## generate client certs docker exec -it ${MASTER_CONTAINER} sh scripts/deploy/init_kusciaapi_client_certs.sh - docker cp "${MASTER_CONTAINER}":/home/kuscia/var/tmp/kusciaapi-client.key "${test_suite_run_kuscia_dir}"/master - docker cp "${MASTER_CONTAINER}":/home/kuscia/var/tmp/kusciaapi-client.crt "${test_suite_run_kuscia_dir}"/master - docker cp "${MASTER_CONTAINER}":/home/kuscia/var/tmp/ca.crt "${test_suite_run_kuscia_dir}"/master - docker cp "${MASTER_CONTAINER}":/home/kuscia/var/tmp/token "${test_suite_run_kuscia_dir}"/master + docker cp "${MASTER_CONTAINER}":/home/kuscia/var/certs/kusciaapi-client.key "${test_suite_run_kuscia_dir}"/master + docker cp "${MASTER_CONTAINER}":/home/kuscia/var/certs/kusciaapi-client.crt "${test_suite_run_kuscia_dir}"/master + docker cp "${MASTER_CONTAINER}":/home/kuscia/var/certs/ca.crt "${test_suite_run_kuscia_dir}"/master + docker cp "${MASTER_CONTAINER}":/home/kuscia/var/certs/token "${test_suite_run_kuscia_dir}"/master unset test_suite_run_kuscia_dir master_container_state lite_alice_container_state lite_bob_container_state } @@ -205,16 +205,16 @@ function start_p2p_mode() { mkdir -p "${test_suite_run_kuscia_dir}"/bob ## generate client certs docker exec -it ${AUTONOMY_ALICE_CONTAINER} sh scripts/deploy/init_kusciaapi_client_certs.sh - docker cp "${AUTONOMY_ALICE_CONTAINER}":/home/kuscia/var/tmp/kusciaapi-client.key "${test_suite_run_kuscia_dir}"/alice - docker cp "${AUTONOMY_ALICE_CONTAINER}":/home/kuscia/var/tmp/kusciaapi-client.crt "${test_suite_run_kuscia_dir}"/alice - docker cp "${AUTONOMY_ALICE_CONTAINER}":/home/kuscia/var/tmp/ca.crt "${test_suite_run_kuscia_dir}"/alice + docker cp "${AUTONOMY_ALICE_CONTAINER}":/home/kuscia/var/certs/kusciaapi-client.key "${test_suite_run_kuscia_dir}"/alice + docker cp "${AUTONOMY_ALICE_CONTAINER}":/home/kuscia/var/certs/kusciaapi-client.crt "${test_suite_run_kuscia_dir}"/alice + docker cp "${AUTONOMY_ALICE_CONTAINER}":/home/kuscia/var/certs/ca.crt "${test_suite_run_kuscia_dir}"/alice ## generate client certs - docker cp "${AUTONOMY_ALICE_CONTAINER}":/home/kuscia/var/tmp/token "${test_suite_run_kuscia_dir}"/alice + docker cp "${AUTONOMY_ALICE_CONTAINER}":/home/kuscia/var/certs/token "${test_suite_run_kuscia_dir}"/alice docker exec -it ${AUTONOMY_BOB_CONTAINER} sh scripts/deploy/init_kusciaapi_client_certs.sh - docker cp "${AUTONOMY_BOB_CONTAINER}":/home/kuscia/var/tmp/kusciaapi-client.key "${test_suite_run_kuscia_dir}"/bob - docker cp "${AUTONOMY_BOB_CONTAINER}":/home/kuscia/var/tmp/kusciaapi-client.crt "${test_suite_run_kuscia_dir}"/bob - docker cp "${AUTONOMY_BOB_CONTAINER}":/home/kuscia/var/tmp/ca.crt "${test_suite_run_kuscia_dir}"/bob - docker cp "${AUTONOMY_BOB_CONTAINER}":/home/kuscia/var/tmp/token "${test_suite_run_kuscia_dir}"/bob + docker cp "${AUTONOMY_BOB_CONTAINER}":/home/kuscia/var/certs/kusciaapi-client.key "${test_suite_run_kuscia_dir}"/bob + docker cp "${AUTONOMY_BOB_CONTAINER}":/home/kuscia/var/certs/kusciaapi-client.crt "${test_suite_run_kuscia_dir}"/bob + docker cp "${AUTONOMY_BOB_CONTAINER}":/home/kuscia/var/certs/ca.crt "${test_suite_run_kuscia_dir}"/bob + docker cp "${AUTONOMY_BOB_CONTAINER}":/home/kuscia/var/certs/token "${test_suite_run_kuscia_dir}"/bob unset test_suite_run_kuscia_dir autonomy_alice_container_state autonomy_bob_container_state } diff --git a/scripts/tools/register_app_image/ezpsi-image.yaml b/scripts/tools/register_app_image/ezpsi-image.yaml deleted file mode 100644 index 99a55799..00000000 --- a/scripts/tools/register_app_image/ezpsi-image.yaml +++ /dev/null @@ -1,39 +0,0 @@ -apiVersion: kuscia.secretflow/v1alpha1 -kind: AppImage -metadata: - name: {{APP_IMAGE_NAME}} -spec: - configTemplates: - task-config.conf: | - { - "task_id": "{{.TASK_ID}}", - "task_input_config": "{{.TASK_INPUT_CONFIG}}", - "task_cluster_def": "{{.TASK_CLUSTER_DEFINE}}", - "allocated_ports": "{{.ALLOCATED_PORTS}}" - } - deployTemplates: - - name: ezpsi - replicas: 1 - spec: - containers: - - args: - - -c - - /root/main --kuscia /etc/kuscia/task-config.conf - command: - - sh - configVolumeMounts: - - mountPath: /etc/kuscia/task-config.conf - subPath: task-config.conf - name: secretflow - ports: - - name: ezpsi - port: 54509 - protocol: HTTP - scope: Cluster - workingDir: /work - restartPolicy: Never - image: - id: abc - name: {{IMAGE_NAME}} - sign: abc - tag: {{IMAGE_TAG}} \ No newline at end of file diff --git a/scripts/tools/register_app_image/register_app_image.sh b/scripts/tools/register_app_image/register_app_image.sh index b03d34d8..2583c60c 100755 --- a/scripts/tools/register_app_image/register_app_image.sh +++ b/scripts/tools/register_app_image/register_app_image.sh @@ -130,9 +130,10 @@ function apply_appimage_crd(){ APP_IMAGE_NAME_IN_KUSCIA=$(echo ${image_name##*/}-${image_tag} | sed 's/_/-/g') APP_IMAGE_FILE=${DEFAULT_APP_IMAGE_FILE} else - APP_IMAGE_FILE="${APP_IMAGE_FILE_DIR}/${APP_IMAGE_NAME_IN_KUSCIA}-image.yaml" + APP_IMAGE_FILE="${APP_IMAGE_FILE_DIR}/${APP_IMAGE_NAME_IN_KUSCIA}.yaml" if [[ ! -f "$APP_IMAGE_FILE" ]]; then - APP_IMAGE_FILE=${DEFAULT_APP_IMAGE_FILE} + echo "=> => $APP_IMAGE_FILE is not exist, register fail" + exit 1 fi fi