From 38bbac20d9e4ab7c6de9e4ac29b3d01097fc0192 Mon Sep 17 00:00:00 2001 From: aaeron Date: Fri, 9 Aug 2024 16:07:22 -0700 Subject: [PATCH] [MESH-5162] - build configurations for state syncer (#730) Co-authored-by: nirvanagit Co-authored-by: Ryan Tay statesyncer changes Signed-off-by: Ryan Tay --- CONTRIBUTING.md | 23 +- admiral/cmd/admiral/cmd/root.go | 2 +- admiral/pkg/clusters/clusterIdentitySyncer.go | 44 -- admiral/pkg/clusters/configSyncer.go | 77 +++ ...itySyncer_test.go => configSyncer_test.go} | 0 admiral/pkg/clusters/configwriter.go | 75 +-- admiral/pkg/clusters/configwriter_test.go | 132 +++-- admiral/pkg/clusters/handler.go | 2 +- admiral/pkg/clusters/mocks_test.go | 44 ++ admiral/pkg/clusters/service_handler.go | 2 +- admiral/pkg/clusters/serviceentry.go | 317 ++++++++--- admiral/pkg/clusters/serviceentry_test.go | 502 ++++++++++++++---- admiral/pkg/clusters/shard_handler.go | 26 +- admiral/pkg/clusters/shard_handler_test.go | 44 +- ...expectedIdentityIdentityConfiguration.json | 65 +++ .../identityIdentityConfiguration.json | 65 +++ ...eshtestblackholeIdentityConfiguration.json | 32 +- ...meshtestinboundsIdentityConfiguration.json | 22 +- .../testdata/sampleIdentityConfiguration.json | 106 ++-- admiral/pkg/clusters/types.go | 19 +- admiral/pkg/clusters/util.go | 119 ++--- admiral/pkg/clusters/util_test.go | 137 ----- admiral/pkg/controller/common/common.go | 83 +++ admiral/pkg/controller/common/common_test.go | 323 +++++++---- admiral/pkg/controller/util/migration.go | 11 +- admiral/pkg/controller/util/migration_test.go | 2 +- admiral/pkg/controller/util/util.go | 10 +- admiral/pkg/mocks/ConfigWriter.go | 52 ++ ...entity_test.go => clusterIdentity_test.go} | 0 admiral/pkg/registry/config.go | 58 ++ admiral/pkg/registry/configCache.go | 72 +++ admiral/pkg/registry/configCache_test.go | 152 ++++++ admiral/pkg/registry/configSyncer.go | 106 +++- admiral/pkg/registry/configWriter.go | 85 +++ .../pkg/registry/mocks/mock_ConfigSyncer.go | 87 +++ admiral/pkg/registry/registry.go | 35 +- admiral/pkg/registry/registry_test.go | 21 +- admiral/pkg/registry/rolloutConfig_parser.go | 15 + .../record1IdentityConfiguration.json | 5 + .../record2IdentityConfiguration.json | 5 + .../testdata/sampleIdentityConfiguration.json | 120 ++--- admiral/pkg/registry/testutils.go | 49 +- admiral/pkg/types/types.go | 11 + admiral/pkg/util/constants.go | 7 + 44 files changed, 2230 insertions(+), 934 deletions(-) delete mode 100644 admiral/pkg/clusters/clusterIdentitySyncer.go create mode 100644 admiral/pkg/clusters/configSyncer.go rename admiral/pkg/clusters/{clusterIdentitySyncer_test.go => configSyncer_test.go} (100%) create mode 100644 admiral/pkg/clusters/mocks_test.go create mode 100644 admiral/pkg/clusters/testdata/expectedIdentityIdentityConfiguration.json create mode 100644 admiral/pkg/clusters/testdata/identityIdentityConfiguration.json create mode 100644 admiral/pkg/mocks/ConfigWriter.go rename admiral/pkg/registry/{clusterdentity_test.go => clusterIdentity_test.go} (100%) create mode 100644 admiral/pkg/registry/config.go create mode 100644 admiral/pkg/registry/configCache.go create mode 100644 admiral/pkg/registry/configCache_test.go create mode 100644 admiral/pkg/registry/configWriter.go create mode 100644 admiral/pkg/registry/mocks/mock_ConfigSyncer.go create mode 100644 admiral/pkg/registry/rolloutConfig_parser.go create mode 100644 admiral/pkg/registry/testdata/record1IdentityConfiguration.json create mode 100644 admiral/pkg/registry/testdata/record2IdentityConfiguration.json create mode 100644 admiral/pkg/types/types.go diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b4405367..12aa9ba6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,7 +3,7 @@ We welcome contributions :) ## Submitting PRs -* Make sure to check existing issues and file an issue before starting to work on a feature/bug. This will help prevent duplication of work. +* Make sure to check existing issues and file an issue before starting to work on a feature/bug. This will help prevent duplication of work. Also refer to [Collaboration](./README.md) for communication channels. ## Setting up for local Development @@ -20,12 +20,12 @@ $ADMIRAL_HOME/tests/create_cluster.sh 1.16.8 export KUBECONFIG=~/.kube/config ``` * Install [Prerequisites](./docs/Examples.md#Prerequisite) and make sure to install istio control plane in cluster. Alternatively, you can use the script to install istio control plane on the cluster created in previous step: - -Mac: `$ADMIRAL_HOME/tests/install_istio.sh 1.10.4 osx` -Mac (Apple Silicon): `$ADMIRAL_HOME/tests/install_istio.sh 1.7.4 osx-arm64` +Mac: `$ADMIRAL_HOME/tests/install_istio.sh 1.20.2 osx` -Linux: `$ADMIRAL_HOME/tests/install_istio.sh 1.7.4 linux` +Mac (Apple Silicon): `$ADMIRAL_HOME/tests/install_istio.sh 1.20.2 osx-arm64` + +Linux: `$ADMIRAL_HOME/tests/install_istio.sh 1.20.2 linux` * Set up necessary permissions and configurations for Admiral @@ -82,7 +82,7 @@ go install sigs.k8s.io/controller-tools@v0.10.0 go install k8s.io/code-generator v0.24.2 go install google.golang.org/protobuf@v1.28.1 make setup -``` +``` ### Generate `*.pb.go` files from `*.proto` files ```bash @@ -94,7 +94,7 @@ go generate ./... make model-gen ``` -* If you've made changes to protobuf model objects and need to re-generate their clientsets, use following steps and checkin the generated files +* If you've made changes to protobuf model objects and need to re-generate their clientsets, use following steps and checkin the generated files ### Generate clientsets ```bash sh hack/update-codegen.sh @@ -114,12 +114,12 @@ make gen-yaml cd $ADMIRAL_HOME/tests ./run.sh "1.16.8" "1.7.4" "../out" ``` -* Multi-cluster +* Multi-cluster ``` TODO ``` -## Before PR +## Before PR 1. Clone repository 1. Add unit tests and fmea tests(in case applicable) along with the checked in code. 1. Confirm that the unit test coverage did not drop with your change. @@ -127,12 +127,11 @@ TODO 1. Please update any bdd tests in case applicable ## During PR -1. Create Pull Request from your branch to the master branch. +1. Create Pull Request from your branch to the master branch. 1. Make sure the build succeeds 1. Maintainers on Admiral Repository will review the pull request. 1. PR will be merged after code is reviewed and all checks are passing - + ## After PR 1. When merging the PR, ensure that all commits are squashed into a single commit. (This can be done in advance via interactive rebase or through the github UI) 1. Once the changes are deployed to qal environment, verify the fix looks good and bdds are successful. - diff --git a/admiral/cmd/admiral/cmd/root.go b/admiral/cmd/admiral/cmd/root.go index 8509e0d5..bb0466f7 100644 --- a/admiral/cmd/admiral/cmd/root.go +++ b/admiral/cmd/admiral/cmd/root.go @@ -81,7 +81,7 @@ func GetRootCmd(args []string) *cobra.Command { // This is required for PERF tests only. // Perf tests requires remote registry object for validations. // There is no way to inject this object - // There is no other away to propogate this object to perf suite + // There is no other away to propagate this object to perf suite if params.KubeconfigPath == loader.FakeKubeconfigPath { cmd.SetContext(context.WithValue(cmd.Context(), "remote-registry", remoteRegistry)) } diff --git a/admiral/pkg/clusters/clusterIdentitySyncer.go b/admiral/pkg/clusters/clusterIdentitySyncer.go deleted file mode 100644 index a9beaa62..00000000 --- a/admiral/pkg/clusters/clusterIdentitySyncer.go +++ /dev/null @@ -1,44 +0,0 @@ -package clusters - -import ( - "fmt" - - "github.com/istio-ecosystem/admiral/admiral/pkg/registry" - log "github.com/sirupsen/logrus" -) - -func updateClusterIdentityCache( - remoteRegistry *RemoteRegistry, - sourceClusters []string, - identity string) error { - - if remoteRegistry == nil { - return fmt.Errorf("remote registry is not initialized") - } - if remoteRegistry.AdmiralCache == nil { - return fmt.Errorf("admiral cache is not initialized") - } - - if remoteRegistry.AdmiralCache.SourceToDestinations == nil { - return fmt.Errorf("source to destination cache is not populated") - } - // find assets this identity needs to call - destinationAssets := remoteRegistry.AdmiralCache.SourceToDestinations.Get(identity) - for _, cluster := range sourceClusters { - sourceClusterIdentity := registry.NewClusterIdentity(identity, true) - err := remoteRegistry.ClusterIdentityStoreHandler.AddUpdateIdentityToCluster(sourceClusterIdentity, cluster) - if err != nil { - return err - } - for _, destinationAsset := range destinationAssets { - destinationClusterIdentity := registry.NewClusterIdentity(destinationAsset, false) - err := remoteRegistry.ClusterIdentityStoreHandler.AddUpdateIdentityToCluster(destinationClusterIdentity, cluster) - if err != nil { - return err - } - } - } - log.Infof("source asset=%s is present in clusters=%v, and has destinations=%v", - identity, sourceClusters, destinationAssets) - return nil -} diff --git a/admiral/pkg/clusters/configSyncer.go b/admiral/pkg/clusters/configSyncer.go new file mode 100644 index 00000000..cab23a9b --- /dev/null +++ b/admiral/pkg/clusters/configSyncer.go @@ -0,0 +1,77 @@ +package clusters + +import ( + "fmt" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/util" + "github.com/pkg/errors" + + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + "github.com/istio-ecosystem/admiral/admiral/pkg/registry" + "github.com/sirupsen/logrus" +) + +func updateClusterIdentityCache( + remoteRegistry *RemoteRegistry, + sourceClusters []string, + identity string) error { + + if remoteRegistry == nil { + return fmt.Errorf("remote registry is not initialized") + } + if remoteRegistry.AdmiralCache == nil { + return fmt.Errorf("admiral cache is not initialized") + } + + if remoteRegistry.AdmiralCache.SourceToDestinations == nil { + return fmt.Errorf("source to destination cache is not populated") + } + // find assets this identity needs to call + destinationAssets := remoteRegistry.AdmiralCache.SourceToDestinations.Get(identity) + for _, cluster := range sourceClusters { + sourceClusterIdentity := registry.NewClusterIdentity(identity, true) + err := remoteRegistry.ClusterIdentityStoreHandler.AddUpdateIdentityToCluster(sourceClusterIdentity, cluster) + if err != nil { + return err + } + for _, destinationAsset := range destinationAssets { + destinationClusterIdentity := registry.NewClusterIdentity(destinationAsset, false) + err := remoteRegistry.ClusterIdentityStoreHandler.AddUpdateIdentityToCluster(destinationClusterIdentity, cluster) + if err != nil { + return err + } + } + } + logrus.Infof("source asset=%s is present in clusters=%v, and has destinations=%v", + identity, sourceClusters, destinationAssets) + return nil +} + +func updateRegistryConfigForClusterPerEnvironment(ctxLogger *logrus.Entry, remoteRegistry *RemoteRegistry, registryConfig registry.IdentityConfig) error { + task := "updateRegistryConfigForClusterPerEnvironment" + defer util.LogElapsedTimeForTask(ctxLogger, task, registryConfig.IdentityName, "", "", "processingTime")() + k8sClient, err := remoteRegistry.ClientLoader.LoadKubeClientFromPath(common.GetKubeconfigPath()) + if err != nil && common.GetSecretFilterTags() == "admiral/syncrtay" { + ctxLogger.Infof(common.CtxLogFormat, task, registryConfig.IdentityName, "", "", "unable to get kube client") + return errors.Wrap(err, "unable to get kube client") + } + for _, clusterConfig := range registryConfig.Clusters { + clusterName := clusterConfig.Name + ctxLogger.Infof(common.CtxLogFormat, task, registryConfig.IdentityName, "", clusterName, "processing cluster") + for _, environmentConfig := range clusterConfig.Environment { + environmentName := environmentConfig.Name + ctxLogger.Infof(common.CtxLogFormat, task, registryConfig.IdentityName, "", clusterName, "processing environment="+environmentName) + err := remoteRegistry.ConfigSyncer.UpdateEnvironmentConfigByCluster( + ctxLogger, + environmentName, + clusterName, + registryConfig, + registry.NewConfigMapWriter(k8sClient, ctxLogger), + ) + if err != nil { + ctxLogger.Errorf(common.CtxLogFormat, task, registryConfig.IdentityName, "", clusterName, "processing environment="+environmentName+" error="+err.Error()) + return err + } + } + } + return nil +} diff --git a/admiral/pkg/clusters/clusterIdentitySyncer_test.go b/admiral/pkg/clusters/configSyncer_test.go similarity index 100% rename from admiral/pkg/clusters/clusterIdentitySyncer_test.go rename to admiral/pkg/clusters/configSyncer_test.go diff --git a/admiral/pkg/clusters/configwriter.go b/admiral/pkg/clusters/configwriter.go index 49e5709b..85c5ca1a 100644 --- a/admiral/pkg/clusters/configwriter.go +++ b/admiral/pkg/clusters/configwriter.go @@ -1,16 +1,15 @@ package clusters import ( - "errors" + "sort" + "strconv" + "strings" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "github.com/istio-ecosystem/admiral/admiral/pkg/registry" - "github.com/istio-ecosystem/admiral/admiral/pkg/util" "github.com/sirupsen/logrus" networkingV1Alpha3 "istio.io/api/networking/v1alpha3" - "sort" - "strconv" - "strings" ) // IstioSEBuilder is an interface to construct Service Entry objects @@ -54,14 +53,10 @@ func (b *ServiceEntryBuilder) BuildServiceEntriesFromIdentityConfig(ctxLogger *l if err != nil { return serviceEntries, err } - ports, err := getServiceEntryPorts(identityConfigEnvironment) - if err != nil { - return serviceEntries, err - } if se, ok := seMap[env]; !ok { tmpSe = &networkingV1Alpha3.ServiceEntry{ Hosts: []string{common.GetCnameVal([]string{env, strings.ToLower(identity), common.GetHostnameSuffix()})}, - Ports: ports, + Ports: identityConfigEnvironment.Ports, Location: networkingV1Alpha3.ServiceEntry_MESH_INTERNAL, Resolution: networkingV1Alpha3.ServiceEntry_DNS, SubjectAltNames: []string{common.SpiffePrefix + common.GetSANPrefix() + common.Slash + identity}, @@ -72,6 +67,7 @@ func (b *ServiceEntryBuilder) BuildServiceEntriesFromIdentityConfig(ctxLogger *l tmpSe = se tmpSe.Endpoints = append(tmpSe.Endpoints, ep) } + sort.Sort(WorkloadEntrySorted(tmpSe.Endpoints)) serviceEntries = append(serviceEntries, tmpSe) } } @@ -80,7 +76,7 @@ func (b *ServiceEntryBuilder) BuildServiceEntriesFromIdentityConfig(ctxLogger *l // getIngressEndpoints constructs the endpoint of the ingress gateway/remote endpoint for an identity // by reading the information directly from the IdentityConfigCluster. -func getIngressEndpoints(clusters []registry.IdentityConfigCluster) (map[string]*networkingV1Alpha3.WorkloadEntry, error) { +func getIngressEndpoints(clusters map[string]*registry.IdentityConfigCluster) (map[string]*networkingV1Alpha3.WorkloadEntry, error) { ingressEndpoints := map[string]*networkingV1Alpha3.WorkloadEntry{} var err error for _, cluster := range clusters { @@ -99,46 +95,32 @@ func getIngressEndpoints(clusters []registry.IdentityConfigCluster) (map[string] return ingressEndpoints, err } -// getServiceEntryPorts constructs the ServicePorts of the service entry that should be built -// for the given identityConfigEnvironment. -func getServiceEntryPorts(identityConfigEnvironment registry.IdentityConfigEnvironment) ([]*networkingV1Alpha3.ServicePort, error) { - port := &networkingV1Alpha3.ServicePort{Number: uint32(common.DefaultServiceEntryPort), Name: util.Http, Protocol: util.Http} - var err error - if len(identityConfigEnvironment.Ports) == 0 { - err = errors.New("identityConfigEnvironment had no ports for: " + identityConfigEnvironment.Name) - } - for _, servicePort := range identityConfigEnvironment.Ports { - //TODO: 8090 is supposed to be set as the common.SidecarEnabledPorts (includeInboundPorts) which we check that in the rollout, but we don't have that information here so assume it is 8090 - if servicePort.TargetPort.IntValue() == 8090 { - protocol := util.GetPortProtocol(servicePort.Name) - port.Name = protocol - port.Protocol = protocol - } - } - ports := []*networkingV1Alpha3.ServicePort{port} - return ports, err -} - // getServiceEntryEndpoint constructs the remote or local endpoints of the service entry that // should be built for the given identityConfigEnvironment. -func getServiceEntryEndpoint(ctxLogger *logrus.Entry, clientCluster string, serverCluster string, ingressEndpoints map[string]*networkingV1Alpha3.WorkloadEntry, identityConfigEnvironment registry.IdentityConfigEnvironment) (*networkingV1Alpha3.WorkloadEntry, error) { +func getServiceEntryEndpoint( + ctxLogger *logrus.Entry, + clientCluster string, + serverCluster string, + ingressEndpoints map[string]*networkingV1Alpha3.WorkloadEntry, + identityConfigEnvironment *registry.IdentityConfigEnvironment) (*networkingV1Alpha3.WorkloadEntry, error) { //TODO: Verify Local and Remote Endpoints are constructed correctly var err error endpoint := ingressEndpoints[serverCluster] tmpEp := endpoint.DeepCopy() tmpEp.Labels["type"] = identityConfigEnvironment.Type if clientCluster == serverCluster { - //Local Endpoint Address if the identity is deployed on the same cluster as it's client and the endpoint is the remote endpoint for the cluster - tmpEp.Address = identityConfigEnvironment.ServiceName + common.Sep + identityConfigEnvironment.Namespace + common.GetLocalDomainSuffix() - for _, servicePort := range identityConfigEnvironment.Ports { - //There should only be one mesh port here (http-service-mesh), but we are preserving ability to have multiple ports - protocol := util.GetPortProtocol(servicePort.Name) - if _, ok := tmpEp.Ports[protocol]; ok { - tmpEp.Ports[protocol] = uint32(servicePort.Port) - ctxLogger.Infof(common.CtxLogFormat, "LocalMeshPort", servicePort.Port, "", serverCluster, "Protocol: "+protocol) - } else { - err = errors.New("failed to get Port for protocol: " + protocol) + for _, service := range identityConfigEnvironment.Services { + if service.Weight == -1 { + // its not a weighted service, which means we should have only one service endpoint + //Local Endpoint Address if the identity is deployed on the same cluster as it's client and the endpoint is the remote endpoint for the cluster + tmpEp.Address = service.Name + common.Sep + identityConfigEnvironment.Namespace + common.GetLocalDomainSuffix() + tmpEp.Ports = service.Ports + break } + // TODO: this needs fixing, because there can be multiple services when we have weighted endpoints, and this + // will only choose one service + tmpEp.Address = service.Name + common.Sep + identityConfigEnvironment.Namespace + common.GetLocalDomainSuffix() + tmpEp.Ports = service.Ports } } return tmpEp, err @@ -147,15 +129,16 @@ func getServiceEntryEndpoint(ctxLogger *logrus.Entry, clientCluster string, serv // getExportTo constructs a sorted list of unique namespaces for a given cluster, client assets, // and cname, where each namespace is where a client asset of the cname is deployed on the cluster. If the cname // is also deployed on the cluster then the istio-system namespace is also in the list. -func getExportTo(ctxLogger *logrus.Entry, registryClient registry.IdentityConfiguration, clientCluster string, isServerOnClientCluster bool, clientAssets []map[string]string) ([]string, error) { +func getExportTo(ctxLogger *logrus.Entry, registryClient registry.IdentityConfiguration, clientCluster string, isServerOnClientCluster bool, clientAssets map[string]string) ([]string, error) { clientNamespaces := []string{} var err error var clientIdentityConfig registry.IdentityConfig - for _, clientAsset := range clientAssets { + for clientAsset := range clientAssets { // For each client asset of cname, we fetch its identityConfig - clientIdentityConfig, err = registryClient.GetIdentityConfigByIdentityName(clientAsset["name"], ctxLogger) + clientIdentityConfig, err = registryClient.GetIdentityConfigByIdentityName(clientAsset, ctxLogger) if err != nil { - ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", clientAsset["name"], common.GetSyncNamespace(), "", "could not fetch IdentityConfig: "+err.Error()) + // TODO: this should return an error. + ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", clientAsset, common.GetSyncNamespace(), "", "could not fetch IdentityConfig: "+err.Error()) continue } for _, clientIdentityConfigCluster := range clientIdentityConfig.Clusters { diff --git a/admiral/pkg/clusters/configwriter_test.go b/admiral/pkg/clusters/configwriter_test.go index 388152f1..c12d0665 100644 --- a/admiral/pkg/clusters/configwriter_test.go +++ b/admiral/pkg/clusters/configwriter_test.go @@ -3,11 +3,13 @@ package clusters import ( "context" "reflect" + "sort" "strings" "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "github.com/istio-ecosystem/admiral/admiral/pkg/registry" "github.com/istio-ecosystem/admiral/admiral/pkg/util" @@ -65,15 +67,17 @@ func createMockServiceEntry(env string, identity string, endpointAddress string, func TestGetIngressEndpoints(t *testing.T) { identityConfig := registry.GetSampleIdentityConfig() - expectedIngressEndpoints := map[string]*networkingV1Alpha3.WorkloadEntry{"cg-tax-ppd-usw2-k8s": { - Address: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", - Locality: "us-west-2", - Ports: map[string]uint32{"http": uint32(15443)}, - Labels: map[string]string{"security.istio.io/tlsMode": "istio"}, - }} + expectedIngressEndpoints := map[string]*networkingV1Alpha3.WorkloadEntry{ + "cluster1": { + Address: "abc-elb.us-west-2.elb.amazonaws.com.", + Locality: "us-west-2", + Ports: map[string]uint32{"http": uint32(15443)}, + Labels: map[string]string{"security.istio.io/tlsMode": "istio"}, + }, + } testCases := []struct { name string - identityConfigClusters []registry.IdentityConfigCluster + identityConfigClusters map[string]*registry.IdentityConfigCluster expectedIngressEndpoints map[string]*networkingV1Alpha3.WorkloadEntry }{ { @@ -87,38 +91,10 @@ func TestGetIngressEndpoints(t *testing.T) { t.Run(c.name, func(t *testing.T) { ingressEndpoints, err := getIngressEndpoints(c.identityConfigClusters) if err != nil { - t.Errorf("While constructing ingressEndpoint, got error: %v", err) + t.Errorf("want=nil, got=%v", err) } if !reflect.DeepEqual(ingressEndpoints, c.expectedIngressEndpoints) { - t.Errorf("Mismatch between constructed ingressEndpoint and expected ingressEndpoint") - } - }) - } -} - -func TestGetServiceEntryPorts(t *testing.T) { - e2eEnv := registry.GetSampleIdentityConfigEnvironment("e2e", "ctg-taxprep-partnerdatatotax-usw2-e2e") - expectedSEPorts := []*networkingV1Alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort), Name: util.Http, Protocol: util.Http}} - testCases := []struct { - name string - identityConfigEnvironment registry.IdentityConfigEnvironment - expectedSEPorts []*networkingV1Alpha3.ServicePort - }{ - { - name: "Given an IdentityConfigEnvironment, " + - "Then the constructed ServiceEntryPorts should be as expected", - identityConfigEnvironment: e2eEnv, - expectedSEPorts: expectedSEPorts, - }, - } - for _, c := range testCases { - t.Run(c.name, func(t *testing.T) { - sePorts, err := getServiceEntryPorts(e2eEnv) - if err != nil { - t.Errorf("While constructing serviceEntryPorts, got error: %v", err) - } - if !reflect.DeepEqual(sePorts, c.expectedSEPorts) { - t.Errorf("Mismatch between constructed ingressEndpoint and expected ingressEndpoint") + t.Errorf("want=%v, got=%v", c.expectedIngressEndpoints, ingressEndpoints) } }) } @@ -128,26 +104,26 @@ func TestGetServiceEntryEndpoint(t *testing.T) { admiralParams := admiralParamsForConfigWriterTests() common.ResetSync() common.InitializeConfig(admiralParams) - e2eEnv := registry.GetSampleIdentityConfigEnvironment("e2e", "ctg-taxprep-partnerdatatotax-usw2-e2e") - ingressEndpoints := map[string]*networkingV1Alpha3.WorkloadEntry{"cg-tax-ppd-usw2-k8s": { - Address: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + e2eEnv := registry.GetSampleIdentityConfigEnvironment("e2e", "ns-1-usw2-e2e") + ingressEndpoints := map[string]*networkingV1Alpha3.WorkloadEntry{"cluster1": { + Address: "abc-elb.us-west-2.elb.amazonaws.com.", Locality: "us-west-2", Ports: map[string]uint32{"http": uint32(15443)}, Labels: map[string]string{"security.istio.io/tlsMode": "istio"}, }, "apigw-cx-ppd-usw2-k8s": { - Address: "internal-a1cbfde75adbe1fed9763495dfd07960-2123389388.us-west-2.elb.amazonaws.com.", + Address: "abc-elb.us-west-2.elb.amazonaws.com.", Locality: "us-west-2", Ports: map[string]uint32{"http": uint32(15443)}, Labels: map[string]string{"security.istio.io/tlsMode": "istio"}, }} remoteEndpoint := &networkingV1Alpha3.WorkloadEntry{ - Address: "internal-a1cbfde75adbe1fed9763495dfd07960-2123389388.us-west-2.elb.amazonaws.com.", + Address: "abc-elb.us-west-2.elb.amazonaws.com.", Locality: "us-west-2", Ports: map[string]uint32{"http": uint32(15443)}, Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"}, } localEndpoint := &networkingV1Alpha3.WorkloadEntry{ - Address: "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-e2e.svc.cluster.local.", + Address: "app-1-spk-root-service.ns-1-usw2-e2e.svc.cluster.local.", Locality: "us-west-2", Ports: map[string]uint32{"http": uint32(8090)}, Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"}, @@ -156,7 +132,7 @@ func TestGetServiceEntryEndpoint(t *testing.T) { ctxLogger := common.GetCtxLogger(ctx, "ctg-taxprep-partnerdatatotax", "") testCases := []struct { name string - identityConfigEnvironment registry.IdentityConfigEnvironment + identityConfigEnvironment *registry.IdentityConfigEnvironment ingressEndpoints map[string]*networkingV1Alpha3.WorkloadEntry clientCluster string serverCluster string @@ -168,7 +144,7 @@ func TestGetServiceEntryEndpoint(t *testing.T) { "Then the constructed endpoint should be a remote endpoint", identityConfigEnvironment: e2eEnv, ingressEndpoints: ingressEndpoints, - clientCluster: "cg-tax-ppd-usw2-k8s", + clientCluster: "cluster1", serverCluster: "apigw-cx-ppd-usw2-k8s", expectedSEEndpoint: remoteEndpoint, }, @@ -178,8 +154,8 @@ func TestGetServiceEntryEndpoint(t *testing.T) { "Then the constructed endpoint should be a local endpoint", identityConfigEnvironment: e2eEnv, ingressEndpoints: ingressEndpoints, - clientCluster: "cg-tax-ppd-usw2-k8s", - serverCluster: "cg-tax-ppd-usw2-k8s", + clientCluster: "cluster1", + serverCluster: "cluster1", expectedSEEndpoint: localEndpoint, }, } @@ -187,12 +163,11 @@ func TestGetServiceEntryEndpoint(t *testing.T) { t.Run(c.name, func(t *testing.T) { seEndpoint, err := getServiceEntryEndpoint(ctxLogger, c.clientCluster, c.serverCluster, c.ingressEndpoints, c.identityConfigEnvironment) if err != nil { - t.Errorf("While constructing serviceEntryPortEndpoint, got error: %v", err) + t.Errorf("want=nil, got=%v", err) } opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.WorkloadEntry{}) if !cmp.Equal(seEndpoint, c.expectedSEEndpoint, opts) { - t.Errorf("Mismatch between constructed ingressEndpoint and expected ingressEndpoint") - t.Errorf(cmp.Diff(seEndpoint, c.expectedSEEndpoint, opts)) + t.Errorf("want=%v, got=%v", c.expectedSEEndpoint, seEndpoint) } }) } @@ -202,13 +177,13 @@ func TestGetExportTo(t *testing.T) { admiralParams := admiralParamsForConfigWriterTests() common.ResetSync() common.InitializeConfig(admiralParams) - ctxLogger := common.GetCtxLogger(context.Background(), "ctg-taxprep-partnerdatatotax", "") + ctxLogger := common.GetCtxLogger(context.Background(), "test", "") testCases := []struct { name string registryClient registry.IdentityConfiguration clientCluster string isServerOnClientCluster bool - clientAssets []map[string]string + clientAssets map[string]string expectedNamespaces []string }{ { @@ -216,31 +191,30 @@ func TestGetExportTo(t *testing.T) { "When the client cluster is the same as the server cluster" + "Then the constructed dependent namespaces should include istio-system", registryClient: registry.NewRegistryClient(registry.WithRegistryEndpoint("PLACEHOLDER")), - clientCluster: "cg-tax-ppd-usw2-k8s", + clientCluster: "cluster1", isServerOnClientCluster: true, - clientAssets: []map[string]string{{"name": "sample"}}, - expectedNamespaces: []string{"ctg-taxprep-partnerdatatotax-usw2-e2e", "ctg-taxprep-partnerdatatotax-usw2-prf", "ctg-taxprep-partnerdatatotax-usw2-qal", "istio-system"}, + clientAssets: map[string]string{"sample": "sample"}, + expectedNamespaces: []string{"istio-system", "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}, }, { name: "Given asset info, cluster info, and client info, " + "When the client cluster is not the same as the server cluster" + "Then the constructed dependent namespaces should not include istio-system", registryClient: registry.NewRegistryClient(registry.WithRegistryEndpoint("PLACEHOLDER")), - clientCluster: "cg-tax-ppd-usw2-k8s", + clientCluster: "cluster1", isServerOnClientCluster: false, - clientAssets: []map[string]string{{"name": "sample"}}, - expectedNamespaces: []string{"ctg-taxprep-partnerdatatotax-usw2-e2e", "ctg-taxprep-partnerdatatotax-usw2-prf", "ctg-taxprep-partnerdatatotax-usw2-qal"}, + clientAssets: map[string]string{"sample": "sample"}, + expectedNamespaces: []string{"ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}, }, } for _, c := range testCases { t.Run(c.name, func(t *testing.T) { namespaces, err := getExportTo(ctxLogger, c.registryClient, c.clientCluster, c.isServerOnClientCluster, c.clientAssets) if err != nil { - t.Errorf("While constructing sorted dependent namespaces, got error: %v", err) + t.Errorf("want=%v, got=%v", nil, err) } if !cmp.Equal(namespaces, c.expectedNamespaces) { - t.Errorf("Mismatch between constructed sortedDependentNamespaces and expected sortedDependentNamespaces") - t.Errorf(cmp.Diff(namespaces, c.expectedNamespaces)) + t.Errorf("want=%v, got=%v", c.expectedNamespaces, namespaces) } }) } @@ -251,15 +225,16 @@ func TestBuildServiceEntriesFromIdentityConfig(t *testing.T) { common.ResetSync() common.InitializeConfig(admiralParams) rr, _ := InitAdmiralOperator(context.Background(), admiralParams) - ctxLogger := common.GetCtxLogger(context.Background(), "ctg-taxprep-partnerdatatotax", "") + ctxLogger := common.GetCtxLogger(context.Background(), "test", "") identityConfig := registry.GetSampleIdentityConfig() - expectedLocalServiceEntryprf := createMockServiceEntry("prf", "Intuit.ctg.taxprep.partnerdatatotax", "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-prf.svc.cluster.local.", 8090, []string{"ctg-taxprep-partnerdatatotax-usw2-e2e", "ctg-taxprep-partnerdatatotax-usw2-prf", "ctg-taxprep-partnerdatatotax-usw2-qal", "istio-system"}) - expectedLocalServiceEntrye2e := createMockServiceEntry("e2e", "Intuit.ctg.taxprep.partnerdatatotax", "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-e2e.svc.cluster.local.", 8090, []string{"ctg-taxprep-partnerdatatotax-usw2-e2e", "ctg-taxprep-partnerdatatotax-usw2-prf", "ctg-taxprep-partnerdatatotax-usw2-qal", "istio-system"}) - expectedLocalServiceEntryqal := createMockServiceEntry("qal", "Intuit.ctg.taxprep.partnerdatatotax", "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-qal.svc.cluster.local.", 8090, []string{"ctg-taxprep-partnerdatatotax-usw2-e2e", "ctg-taxprep-partnerdatatotax-usw2-prf", "ctg-taxprep-partnerdatatotax-usw2-qal", "istio-system"}) - expectedLocalServiceEntries := []*networkingV1Alpha3.ServiceEntry{&expectedLocalServiceEntryprf, &expectedLocalServiceEntrye2e, &expectedLocalServiceEntryqal} + expectedLocalServiceEntryPRF := createMockServiceEntry("prf", "sample", "app-1-spk-root-service.ns-1-usw2-prf.svc.cluster.local.", 8090, []string{"istio-system", "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}) + expectedLocalServiceEntryE2E := createMockServiceEntry("e2e", "sample", "app-1-spk-root-service.ns-1-usw2-e2e.svc.cluster.local.", 8090, []string{"istio-system", "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}) + expectedLocalServiceEntryQAL := createMockServiceEntry("qal", "sample", "app-1-spk-root-service.ns-1-usw2-qal.svc.cluster.local.", 8090, []string{"istio-system", "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}) + expectedLocalServiceEntries := []*networkingV1Alpha3.ServiceEntry{&expectedLocalServiceEntryQAL, &expectedLocalServiceEntryPRF, &expectedLocalServiceEntryE2E} testCases := []struct { name string clientCluster string + event admiral.EventType identityConfig registry.IdentityConfig expectedServiceEntries []*networkingV1Alpha3.ServiceEntry }{ @@ -267,7 +242,8 @@ func TestBuildServiceEntriesFromIdentityConfig(t *testing.T) { name: "Given information to build an se, " + "When the client cluster is the same as the server cluster" + "Then the constructed se should have local endpoint and istio-system in exportTo", - clientCluster: "cg-tax-ppd-usw2-k8s", + clientCluster: "cluster1", + event: admiral.Add, identityConfig: identityConfig, expectedServiceEntries: expectedLocalServiceEntries, }, @@ -277,13 +253,29 @@ func TestBuildServiceEntriesFromIdentityConfig(t *testing.T) { serviceEntryBuilder := ServiceEntryBuilder{ClientCluster: c.clientCluster, RemoteRegistry: rr} serviceEntries, err := serviceEntryBuilder.BuildServiceEntriesFromIdentityConfig(ctxLogger, c.identityConfig) if err != nil { - t.Errorf("While constructing service entries, got error: %v", err) + t.Errorf("want=%v, \ngot=%v", nil, err) } opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.ServiceEntry{}, networkingV1Alpha3.ServicePort{}, networkingV1Alpha3.WorkloadEntry{}) + // sort the service entries by name + sort.Sort(ServiceEntryListSorterByHost(serviceEntries)) + sort.Sort(ServiceEntryListSorterByHost(c.expectedServiceEntries)) if !cmp.Equal(serviceEntries, c.expectedServiceEntries, opts) { - t.Errorf("Mismatch between constructed sorted entries and expected service entries") - t.Errorf(cmp.Diff(serviceEntries, c.expectedServiceEntries, opts)) + t.Errorf("want=%v, \ngot=%v", c.expectedServiceEntries, serviceEntries) } }) } } + +type ServiceEntryListSorterByHost []*networkingV1Alpha3.ServiceEntry + +func (s ServiceEntryListSorterByHost) Len() int { + return len(s) +} + +func (w ServiceEntryListSorterByHost) Less(i, j int) bool { + return w[i].Hosts[0] < w[j].Hosts[0] +} + +func (w ServiceEntryListSorterByHost) Swap(i, j int) { + w[i], w[j] = w[j], w[i] +} diff --git a/admiral/pkg/clusters/handler.go b/admiral/pkg/clusters/handler.go index 10897ae6..c756c2c4 100644 --- a/admiral/pkg/clusters/handler.go +++ b/admiral/pkg/clusters/handler.go @@ -94,7 +94,7 @@ func getServiceForDeployment(rc *RemoteController, deployment *appsV1.Deployment var match = common.IsServiceMatch(service.Spec.Selector, deployment.Spec.Selector) //make sure the service matches the deployment Selector and also has a mesh port in the port spec if match { - ports := GetMeshPortsForDeployments(rc.ClusterID, service, deployment) + ports := common.GetMeshPortsForDeployments(rc.ClusterID, service, deployment) if len(ports) > 0 { matchedService = service break diff --git a/admiral/pkg/clusters/mocks_test.go b/admiral/pkg/clusters/mocks_test.go new file mode 100644 index 00000000..8cc4b253 --- /dev/null +++ b/admiral/pkg/clusters/mocks_test.go @@ -0,0 +1,44 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. +package clusters + +import ( + context "context" + logrus "github.com/sirupsen/logrus" + mock "github.com/stretchr/testify/mock" + v1alpha3 "istio.io/api/networking/v1alpha3" +) + +// ConfigWriter is an autogenerated mock type for the ConfigWriter type +type ConfigWriterMock struct { + mock.Mock +} + +// AddServiceEntriesWithDrToAllCluster provides a mock function with given fields: ctxLogger, ctx, rr, sourceClusters, serviceEntries, isAdditionalEndpointsEnabled, isServiceEntryModifyCalledForSourceCluster, identityId, env +func (_m *ConfigWriterMock) AddServiceEntriesWithDrToAllCluster(ctxLogger *logrus.Entry, ctx context.Context, rr *RemoteRegistry, sourceClusters map[string]string, serviceEntries map[string]*v1alpha3.ServiceEntry, isAdditionalEndpointsEnabled bool, isServiceEntryModifyCalledForSourceCluster bool, identityId string, env string) error { + ret := _m.Called(ctxLogger, ctx, rr, sourceClusters, serviceEntries, isAdditionalEndpointsEnabled, isServiceEntryModifyCalledForSourceCluster, identityId, env) + + if len(ret) == 0 { + panic("no return value specified for AddServiceEntriesWithDrToAllCluster") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*logrus.Entry, context.Context, *RemoteRegistry, map[string]string, map[string]*v1alpha3.ServiceEntry, bool, bool, string, string) error); ok { + r0 = rf(ctxLogger, ctx, rr, sourceClusters, serviceEntries, isAdditionalEndpointsEnabled, isServiceEntryModifyCalledForSourceCluster, identityId, env) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewConfigWriter creates a new instance of ConfigWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConfigWriterMock(t interface { + mock.TestingT + Cleanup(func()) +}) *ConfigWriterMock { + mock := &ConfigWriterMock{} + mock.Mock.Test(t) + t.Cleanup(func() { mock.AssertExpectations(t) }) + return mock +} diff --git a/admiral/pkg/clusters/service_handler.go b/admiral/pkg/clusters/service_handler.go index 874ebd58..2a3f1d30 100644 --- a/admiral/pkg/clusters/service_handler.go +++ b/admiral/pkg/clusters/service_handler.go @@ -243,7 +243,7 @@ func checkIfThereAreMultipleMatchingServices(svc *coreV1.Service, serviceControl if !ok { return false, fmt.Errorf("type assertion failed, %v is not of type *v1.Deployment", obj) } - ports = GetMeshPortsForDeployments(clusterName, service, &deployment) + ports = common.GetMeshPortsForDeployments(clusterName, service, &deployment) } else { rollout, ok := obj.(rolloutsV1Alpha1.Rollout) if !ok { diff --git a/admiral/pkg/clusters/serviceentry.go b/admiral/pkg/clusters/serviceentry.go index e4af10dd..aa01f46a 100644 --- a/admiral/pkg/clusters/serviceentry.go +++ b/admiral/pkg/clusters/serviceentry.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/istio-ecosystem/admiral/admiral/pkg/registry" commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util" "gopkg.in/yaml.v2" @@ -55,9 +56,16 @@ const ( gtpManagerMeshAgentFieldValue = "ewok-mesh-agent" ) -func createServiceEntryForDeployment(ctxLogger *logrus.Entry, ctx context.Context, event admiral.EventType, rc *RemoteController, admiralCache *AdmiralCache, - meshPorts map[string]uint32, destDeployment *k8sAppsV1.Deployment, serviceEntries map[string]*networking.ServiceEntry) (*networking.ServiceEntry, error) { - defer util.LogElapsedTimeForModifySE(ctxLogger, "createServiceEntryForDeployment", "", "", "", "")() +func createServiceEntryForDeployment( + ctxLogger *logrus.Entry, + ctx context.Context, + event admiral.EventType, + rc *RemoteController, + admiralCache *AdmiralCache, + meshPorts map[string]uint32, + destDeployment *k8sAppsV1.Deployment, + serviceEntries map[string]*networking.ServiceEntry) (*networking.ServiceEntry, error) { + defer util.LogElapsedTimeForTask(ctxLogger, "createServiceEntryForDeployment", "", "", "", "")() workloadIdentityKey := common.GetWorkloadIdentifier() globalFqdn := common.GetCname(destDeployment, workloadIdentityKey, common.GetHostnameSuffix()) @@ -67,7 +75,7 @@ func createServiceEntryForDeployment(ctxLogger *logrus.Entry, ctx context.Contex if err != nil { return nil, err } - util.LogElapsedTimeSinceForModifySE(ctxLogger, "GetUniqueAddress", + util.LogElapsedTimeSinceTask(ctxLogger, "GetUniqueAddress", "", "", rc.ClusterID, "", start) if !common.DisableIPGeneration() && len(address) == 0 { @@ -90,7 +98,7 @@ func modifyServiceEntryForNewServiceOrPod( sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*networking.ServiceEntry, error) { ctxLogger := common.GetCtxLogger(ctx, sourceIdentity, env) ctxLogger.Infof(common.CtxLogFormat, "event", "", "", "", "received") - defer util.LogElapsedTimeForModifySE(ctxLogger, "event", "", "", "", "TotalModifySETime")() + defer util.LogElapsedTimeForTask(ctxLogger, "event", "", "", "", "TotalModifySETime")() var modifySEerr error var isServiceEntryModifyCalledForSourceCluster bool totalConfigWriterEvents.Increment(api.WithAttributes( @@ -102,7 +110,7 @@ func modifyServiceEntryForNewServiceOrPod( partitionedIdentity := sourceIdentity sourceIdentity = getNonPartitionedIdentity(remoteRegistry.AdmiralCache, sourceIdentity) if remoteRegistry.ServiceEntrySuspender.SuspendUpdate(sourceIdentity, env) { - ctxLogger.Infof(common.CtxLogFormat, event, "", "", sourceIdentity, env, "", + ctxLogger.Infof(common.CtxLogFormat, "SuspenderCheck", "", "", "processing skipped as service entry update is suspended for identity") return nil, fmt.Errorf("processing skipped as service entry update is suspended for identity %s in environment %s", sourceIdentity, env) } @@ -116,6 +124,11 @@ func modifyServiceEntryForNewServiceOrPod( ctxLogger.Infof(common.CtxLogFormat, event, "", "", "", "processing skipped during cache warm up state") return nil, fmt.Errorf(common.CtxLogFormat, event, env, sourceIdentity, "", "processing skipped during cache warm up state for env="+env+" identity="+sourceIdentity) } + + registryConfig := registry.IdentityConfig{ + IdentityName: sourceIdentity, + Clusters: make(map[string]*registry.IdentityConfigCluster), + } ctxLogger.Infof(common.CtxLogFormat, event, "", "", "", "processing") var ( cname string @@ -191,6 +204,17 @@ func modifyServiceEntryForNewServiceOrPod( ctxLogger.Infof(common.CtxLogFormat, "event", "", "", clusterId, "neither deployment nor rollouts found") continue } + ingressEndpoint, port := getIngressEndpointAndPort(rc) + + registryConfig.Clusters[clusterId] = ®istry.IdentityConfigCluster{ + Name: clusterId, + Locality: getLocality(rc), + IngressEndpoint: ingressEndpoint, + IngressPort: strconv.Itoa(port), + Environment: map[string]*registry.IdentityConfigEnvironment{ + env: ®istry.IdentityConfigEnvironment{}, + }, + } // For Deployment <-> Rollout migration // Check the type of the application and set the required variables. @@ -219,7 +243,7 @@ func modifyServiceEntryForNewServiceOrPod( } remoteRegistry.AdmiralCache.IdentityClusterCache.Put(partitionedIdentity, rc.ClusterID, rc.ClusterID) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheIdentityClusterCachePut", + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheIdentityClusterCachePut", deploymentOrRolloutName, deploymentOrRolloutNS, rc.ClusterID, "", start) if deployment != nil { @@ -240,7 +264,7 @@ func modifyServiceEntryForNewServiceOrPod( sourceServices[rc.ClusterID][common.Deployment] = serviceInstance namespace = deployment.Namespace - localMeshPorts := GetMeshPortsForDeployments(rc.ClusterID, serviceInstance, deployment) + localMeshPorts := common.GetMeshPortsForDeployments(rc.ClusterID, serviceInstance, deployment) cname = common.GetCname(deployment, common.GetWorkloadIdentifier(), common.GetHostnameSuffix()) sourceDeployments[rc.ClusterID] = deployment sourceClusters = append(sourceClusters, clusterId) @@ -257,9 +281,15 @@ func modifyServiceEntryForNewServiceOrPod( _, errCreateSE := createServiceEntryForDeployment(ctxLogger, ctx, eventType, rc, remoteRegistry.AdmiralCache, localMeshPorts, deployment, serviceEntries) ctxLogger.Infof(common.CtxLogFormat, "BuildServiceEntry", deploymentOrRolloutName, deploymentOrRolloutNS, clusterId, "total service entries built="+strconv.Itoa(len(serviceEntries))) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheCreateServiceEntryForDeployment", + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheCreateServiceEntryForDeployment", deploymentOrRolloutName, deploymentOrRolloutNS, rc.ClusterID, "", start) modifySEerr = common.AppendError(modifySEerr, errCreateSE) + + registryConfig.Clusters[clusterId].Environment[env] = ®istry.IdentityConfigEnvironment{ + Name: env, + Namespace: namespace, + Type: common.Deployment, + } } if rollout != nil { @@ -301,15 +331,20 @@ func modifyServiceEntryForNewServiceOrPod( start = time.Now() _, errCreateSE := createServiceEntryForRollout(ctxLogger, ctx, eventType, rc, remoteRegistry.AdmiralCache, localMeshPorts, rollout, serviceEntries) ctxLogger.Infof(common.CtxLogFormat, "BuildServiceEntry", deploymentOrRolloutName, deploymentOrRolloutNS, clusterId, "total service entries built="+strconv.Itoa(len(serviceEntries))) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheCreateServiceEntryForRollout", + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheCreateServiceEntryForRollout", deploymentOrRolloutName, deploymentOrRolloutNS, rc.ClusterID, "", start) modifySEerr = common.AppendError(modifySEerr, errCreateSE) + registryConfig.Clusters[clusterId].Environment[env] = ®istry.IdentityConfigEnvironment{ + Name: env, + Namespace: namespace, + Type: common.Rollout, + } } start = time.Now() remoteRegistry.AdmiralCache.CnameClusterCache.Put(cname, rc.ClusterID, rc.ClusterID) remoteRegistry.AdmiralCache.CnameIdentityCache.Store(cname, partitionedIdentity) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheCnameClusterCachePutAndCnameIdentityCacheStore", + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheCnameClusterCachePutAndCnameIdentityCacheStore", deploymentOrRolloutName, deploymentOrRolloutNS, rc.ClusterID, "", start) sourceWeightedServices[rc.ClusterID] = weightedServices @@ -369,7 +404,10 @@ func modifyServiceEntryForNewServiceOrPod( for cluster := range sourceRollouts { sourceClusters = append(sourceClusters, cluster) } - return nil, updateClusterIdentityCache(remoteRegistry, sourceClusters, sourceIdentity) + err := updateClusterIdentityCache(remoteRegistry, sourceClusters, sourceIdentity) + if err != nil { + return nil, err + } } //PID: use partitionedIdentity because IdentityDependencyCache is filled using the partitionedIdentity - DONE @@ -382,7 +420,7 @@ func modifyServiceEntryForNewServiceOrPod( } start = time.Now() updateCnameDependentClusterNamespaceCache(ctxLogger, remoteRegistry, dependents, deploymentOrRolloutName, deploymentOrRolloutNS, cname, sourceServices) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheCnameDependentClusterNamespaceCachePut", + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheCnameDependentClusterNamespaceCachePut", deploymentOrRolloutName, deploymentOrRolloutNS, "", "", start) dependentClusters := make(map[string]string) if remoteRegistry.AdmiralCache.CnameDependentClusterCache != nil && remoteRegistry.AdmiralCache.CnameDependentClusterCache.Get(cname) != nil { @@ -403,7 +441,7 @@ func modifyServiceEntryForNewServiceOrPod( return nil, nil } - util.LogElapsedTimeSinceForModifySE(ctxLogger, "BuildServiceEntry", + util.LogElapsedTimeSinceTask(ctxLogger, "BuildServiceEntry", deploymentOrRolloutName, deploymentOrRolloutNS, "", "", start) //cache the latest GTP in global cache to be reused during DR creation @@ -412,20 +450,24 @@ func modifyServiceEntryForNewServiceOrPod( if err != nil { modifySEerr = common.AppendError(modifySEerr, err) } - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheUpdateGlobalGtpCache", + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheUpdateGlobalGtpCache", deploymentOrRolloutName, deploymentOrRolloutNS, "", "", start) start = time.Now() - updateGlobalOutlierDetectionCache(ctxLogger, remoteRegistry.AdmiralCache, sourceIdentity, env, outlierDetections) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheUpdateGlobalOutlierDetectionCache", + updateGlobalOutlierDetectionCache( + ctxLogger, remoteRegistry.AdmiralCache, + sourceIdentity, env, outlierDetections) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheUpdateGlobalOutlierDetectionCache", deploymentOrRolloutName, deploymentOrRolloutNS, "", "", start) start = time.Now() - err = updateGlobalClientConnectionConfigCache(ctxLogger, remoteRegistry.AdmiralCache, sourceIdentity, env, clientConnectionSettings) + err = updateGlobalClientConnectionConfigCache( + ctxLogger, remoteRegistry.AdmiralCache, sourceIdentity, + env, clientConnectionSettings) if err != nil { ctxLogger.Warnf(common.CtxLogFormat, "UpdateGlobalClientConnectionConfigCache", "", "", "", err.Error()) } - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheUpdateGlobalClientConnectionConfigCache", + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheUpdateGlobalClientConnectionConfigCache", deploymentOrRolloutName, deploymentOrRolloutNS, "", "", start) //handle local updates (source clusters first) @@ -486,19 +528,57 @@ func modifyServiceEntryForNewServiceOrPod( meshPorts = GetMeshPortsForRollout(sourceCluster, serviceInstance[common.Rollout], sourceRollouts[sourceCluster]) meshDeployAndRolloutPorts[common.Rollout] = meshPorts } - meshPorts = GetMeshPortsForDeployments(sourceCluster, serviceInstance[common.Deployment], sourceDeployments[sourceCluster]) + meshPorts = common.GetMeshPortsForDeployments(sourceCluster, serviceInstance[common.Deployment], sourceDeployments[sourceCluster]) meshDeployAndRolloutPorts[common.Deployment] = meshPorts } else { meshPorts = GetMeshPortsForRollout(sourceCluster, serviceInstance[common.Rollout], sourceRollouts[sourceCluster]) meshDeployAndRolloutPorts[common.Rollout] = meshPorts } + registryConfig.Clusters[sourceCluster].IngressPortName = getIngressPortName(meshPorts) + + registryConfig.Clusters[sourceCluster].Environment[env].Ports = []*networking.ServicePort{getServiceEntryPort(meshPorts)} + // store admiral crd configurations for state syncer + gtp, err := remoteRegistry.AdmiralCache.GlobalTrafficCache.GetFromIdentity(sourceIdentity, env) + if err != nil { + return nil, err + } + od, err := remoteRegistry.AdmiralCache.OutlierDetectionCache.GetFromIdentity(sourceIdentity, env) + if err != nil { + return nil, err + } + ccc, err := remoteRegistry.AdmiralCache.ClientConnectionConfigCache.GetFromIdentity(sourceIdentity, env) + if err != nil { + return nil, err + } + registryConfig.Clusters[sourceCluster].Environment[env].TrafficPolicy = registry.TrafficPolicy{} + if gtp != nil { + registryConfig.Clusters[sourceCluster].Environment[env].TrafficPolicy.GlobalTrafficPolicy = *gtp + } + if od != nil { + registryConfig.Clusters[sourceCluster].Environment[env].TrafficPolicy.OutlierDetection = *od + } + if ccc != nil { + registryConfig.Clusters[sourceCluster].Environment[env].TrafficPolicy.ClientConnectionConfig = *ccc + } for key, serviceEntry := range serviceEntries { if len(serviceEntry.Endpoints) == 0 || (!deployRolloutMigration[sourceCluster] && clustersToDeleteSE[sourceCluster]) { + if common.IsAdmiralStateSyncerMode() { + registryConfig.Clusters[sourceCluster].Environment[env] = ®istry.IdentityConfigEnvironment{ + Name: env, + Namespace: namespace, + Type: common.Rollout, + Event: admiral.Delete, // TODO: we need to handle DELETE operations in admiral operator + } + continue + } ctxLogger.Infof(common.CtxLogFormat, "WriteServiceEntryToSourceClusters", deploymentOrRolloutName, deploymentOrRolloutNS, sourceCluster, "writing to cluster="+sourceCluster) - err := AddServiceEntriesWithDrToAllCluster(ctxLogger, - ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, - map[string]*networking.ServiceEntry{key: serviceEntry}, isAdditionalEndpointGenerationEnabled, isServiceEntryModifyCalledForSourceCluster, + err := remoteRegistry.ConfigWriter.AddServiceEntriesWithDrToAllCluster( + ctxLogger, ctx, remoteRegistry, + map[string]string{sourceCluster: sourceCluster}, + map[string]*networking.ServiceEntry{key: serviceEntry}, + isAdditionalEndpointGenerationEnabled, + isServiceEntryModifyCalledForSourceCluster, partitionedIdentity, env) if err != nil { ctxLogger.Errorf(common.CtxLogFormat, "Event", deploymentOrRolloutName, deploymentOrRolloutNS, sourceCluster, err.Error()) @@ -510,13 +590,26 @@ func modifyServiceEntryForNewServiceOrPod( for _, ep := range serviceEntry.Endpoints { //replace istio ingress-gateway address with local fqdn, note that ingress-gateway can be empty (not provisioned, or is not up) if ep.Address == clusterIngress || ep.Address == "" { - // Update endpoints with locafqdn for active and preview se of bluegreen rollout + // Update endpoints with localfqdn for active and preview se of bluegreen rollout if blueGreenStrategy { ctxLogger.Infof(common.CtxLogFormat, "WriteServiceEntryToSourceClusters", deploymentOrRolloutName, deploymentOrRolloutNS, sourceCluster, "Updating ServiceEntry with blue/green endpoints") oldPorts := ep.Ports - updateEndpointsForBlueGreen(sourceRollouts[sourceCluster], sourceWeightedServices[sourceCluster], cnames, ep, sourceCluster, key) - err := AddServiceEntriesWithDrToAllCluster( + blueGreenService := updateEndpointsForBlueGreen( + sourceRollouts[sourceCluster], + sourceWeightedServices[sourceCluster], + cnames, ep, sourceCluster, key) + if common.IsAdmiralStateSyncerMode() { + registryConfig.Clusters[sourceCluster].Environment[env].Services = map[string]*registry.RegistryServiceConfig{ + blueGreenService.Service.Name: ®istry.RegistryServiceConfig{ + Name: blueGreenService.Service.Name, + Weight: -1, + Ports: GetMeshPortsForRollout(sourceCluster, blueGreenService.Service, sourceRollouts[sourceCluster]), + }, + } + continue + } + err := remoteRegistry.ConfigWriter.AddServiceEntriesWithDrToAllCluster( ctxLogger, ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, map[string]*networking.ServiceEntry{key: serviceEntry}, isAdditionalEndpointGenerationEnabled, isServiceEntryModifyCalledForSourceCluster, partitionedIdentity, env) if err != nil { @@ -535,11 +628,21 @@ func modifyServiceEntryForNewServiceOrPod( //Nil check for canary service is done in iscanaryIstioStrategy function canaryService := sourceRollouts[sourceCluster].Spec.Strategy.Canary.CanaryService // use only canary service for fqdn + if common.IsAdmiralStateSyncerMode() { + registryConfig.Clusters[sourceCluster].Environment[env].Services = map[string]*registry.RegistryServiceConfig{ + canaryService: ®istry.RegistryServiceConfig{ + Name: canaryService, + Weight: -1, + Ports: meshPorts, + }, + } + continue + } fqdn := canaryService + common.Sep + serviceInstance[appType[sourceCluster]].Namespace + common.GetLocalDomainSuffix() ep.Address = fqdn oldPorts := ep.Ports ep.Ports = meshPorts - err := AddServiceEntriesWithDrToAllCluster( + err := remoteRegistry.ConfigWriter.AddServiceEntriesWithDrToAllCluster( ctxLogger, ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, map[string]*networking.ServiceEntry{key: serviceEntry}, isAdditionalEndpointGenerationEnabled, isServiceEntryModifyCalledForSourceCluster, partitionedIdentity, env) if err != nil { @@ -550,13 +653,16 @@ func modifyServiceEntryForNewServiceOrPod( // swap it back to use for next iteration ep.Address = clusterIngress ep.Ports = oldPorts - } else if len(sourceWeightedServices[sourceCluster]) > 1 { ctxLogger.Infof(common.CtxLogFormat, "WriteServiceEntryToSourceClusters", deploymentOrRolloutName, deploymentOrRolloutNS, sourceCluster, "Updating ServiceEntry with weighted endpoints") var se = copyServiceEntry(serviceEntry) updateEndpointsForWeightedServices(se, sourceWeightedServices[sourceCluster], clusterIngress, meshPorts) - err := AddServiceEntriesWithDrToAllCluster( + if common.IsAdmiralStateSyncerMode() { + registryConfig.Clusters[sourceCluster].Environment[env].Services = parseWeightedService(sourceWeightedServices[sourceCluster]) + continue + } + err := remoteRegistry.ConfigWriter.AddServiceEntriesWithDrToAllCluster( ctxLogger, ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, map[string]*networking.ServiceEntry{key: se}, isAdditionalEndpointGenerationEnabled, isServiceEntryModifyCalledForSourceCluster, partitionedIdentity, env) if err != nil { @@ -569,7 +675,10 @@ func modifyServiceEntryForNewServiceOrPod( deploymentOrRolloutName, deploymentOrRolloutNS, sourceCluster, "Updating ServiceEntry for Deployment to Rollout migration") var err error var se = copyServiceEntry(serviceEntry) - err = util.UpdateEndpointsForDeployToRolloutMigration(serviceInstance, se, meshDeployAndRolloutPorts, clusterIngress, clusterAppDeleteMap, sourceCluster, clusterDeployRolloutPresent) + migrationService, err := util.UpdateEndpointsForDeployToRolloutMigration( + serviceInstance, se, meshDeployAndRolloutPorts, + clusterIngress, clusterAppDeleteMap, sourceCluster, + clusterDeployRolloutPresent) // If the previous function returned an error that means the endpoints were not updated // we should retry updating the endpoints and not apply the non modified SE to the cluster if err != nil { @@ -578,7 +687,11 @@ func modifyServiceEntryForNewServiceOrPod( modifySEerr = common.AppendError(modifySEerr, err) break } - err = AddServiceEntriesWithDrToAllCluster( + if common.IsAdmiralStateSyncerMode() { + registryConfig.Clusters[sourceCluster].Environment[env].Services = parseMigrationService(migrationService) + continue + } + err = remoteRegistry.ConfigWriter.AddServiceEntriesWithDrToAllCluster( ctxLogger, ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, map[string]*networking.ServiceEntry{key: se}, isAdditionalEndpointGenerationEnabled, isServiceEntryModifyCalledForSourceCluster, partitionedIdentity, env) if err != nil { @@ -589,10 +702,21 @@ func modifyServiceEntryForNewServiceOrPod( } else { ctxLogger.Infof(common.CtxLogFormat, "WriteServiceEntryToSourceClusters", deploymentOrRolloutName, deploymentOrRolloutNS, sourceCluster, "Updating ServiceEntry regular endpoints") + // call State Syncer's config syncer for deployment + if common.IsAdmiralStateSyncerMode() { + registryConfig.Clusters[sourceCluster].Environment[env].Services = map[string]*registry.RegistryServiceConfig{ + localFqdn: ®istry.RegistryServiceConfig{ + Name: localFqdn, + Weight: 0, + Ports: meshPorts, + }, + } + continue + } ep.Address = localFqdn oldPorts := ep.Ports ep.Ports = meshPorts - err := AddServiceEntriesWithDrToAllCluster( + err := remoteRegistry.ConfigWriter.AddServiceEntriesWithDrToAllCluster( ctxLogger, ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, map[string]*networking.ServiceEntry{key: serviceEntry}, isAdditionalEndpointGenerationEnabled, isServiceEntryModifyCalledForSourceCluster, partitionedIdentity, env) if err != nil { @@ -608,6 +732,11 @@ func modifyServiceEntryForNewServiceOrPod( } } + if common.IsAdmiralStateSyncerMode() { + registryConfig.ClientAssets = dependents + continue + } + start = time.Now() if common.GetWorkloadSidecarUpdate() == "enabled" { err := modifySidecarForLocalClusterCommunication( @@ -625,9 +754,20 @@ func modifyServiceEntryForNewServiceOrPod( } } - util.LogElapsedTimeSinceForModifySE(ctxLogger, "WriteServiceEntryToSourceClusters", + ctxLogger.Infof(common.CtxLogFormat, "ClientAssets", + deploymentOrRolloutName, deploymentOrRolloutNS, "", fmt.Sprintf("asset list=%v dependents=%v", registryConfig.ClientAssets, dependents)) + util.LogElapsedTimeSinceTask(ctxLogger, "WriteServiceEntryToSourceClusters", deploymentOrRolloutName, deploymentOrRolloutNS, sourceIdentity, "", start) + ctxLogger.Infof(common.CtxLogFormat, "updateRegistryConfigForClusterPerEnvironment", deploymentOrRolloutName, deploymentOrRolloutNS, "", "") + if common.IsAdmiralStateSyncerMode() { + err = updateRegistryConfigForClusterPerEnvironment(ctxLogger, remoteRegistry, registryConfig) + if err != nil { + return nil, err + } + ctxLogger.Infof(common.CtxLogFormat, "updateRegistryConfigForClusterPerEnvironment", deploymentOrRolloutName, deploymentOrRolloutNS, "", "done writing") + return nil, nil + } //Write to dependent clusters start = time.Now() isServiceEntryModifyCalledForSourceCluster = false @@ -642,13 +782,13 @@ func modifyServiceEntryForNewServiceOrPod( ctxLogger.Infof(common.CtxLogFormat, "WriteServiceEntryToDependentClusters", deploymentOrRolloutName, deploymentOrRolloutNS, "", fmt.Sprintf("Using override values of dependent clusters: %v, count: %v", clusters, len(clusters))) dependentClusters = clusters } - err = AddServiceEntriesWithDrToAllCluster(ctxLogger, ctx, remoteRegistry, dependentClusters, serviceEntries, isAdditionalEndpointGenerationEnabled, isServiceEntryModifyCalledForSourceCluster, partitionedIdentity, env) + err = remoteRegistry.ConfigWriter.AddServiceEntriesWithDrToAllCluster(ctxLogger, ctx, remoteRegistry, dependentClusters, serviceEntries, isAdditionalEndpointGenerationEnabled, isServiceEntryModifyCalledForSourceCluster, partitionedIdentity, env) if err != nil { ctxLogger.Errorf(common.CtxLogFormat, "Event", deploymentOrRolloutName, deploymentOrRolloutNS, "", err.Error()) modifySEerr = common.AppendError(modifySEerr, err) } - util.LogElapsedTimeSinceForModifySE(ctxLogger, "WriteServiceEntryToDependentClusters", + util.LogElapsedTimeSinceTask(ctxLogger, "WriteServiceEntryToDependentClusters", deploymentOrRolloutName, deploymentOrRolloutNS, "", "", start) return serviceEntries, modifySEerr @@ -813,7 +953,7 @@ func updateGlobalOutlierDetectionCache(ctxLogger *logrus.Entry, cache *AdmiralCa // i) Picks the GTP that was created most recently from the passed in GTP list based on GTP priority label (GTPs from all clusters) // ii) Updates the global GTP cache with the selected GTP in i) func updateGlobalGtpCache(remoteRegistry *RemoteRegistry, identity, env string, gtps map[string][]*v1.GlobalTrafficPolicy, clusterName string, ctxLogger *logrus.Entry) error { - defer util.LogElapsedTimeForModifySE(ctxLogger, "updateGlobalGtpCache", "", "", "", "")() + defer util.LogElapsedTimeForTask(ctxLogger, "updateGlobalGtpCache", "", "", "", "")() gtpsOrdered := make([]*v1.GlobalTrafficPolicy, 0) for _, gtpsInCluster := range gtps { gtpsOrdered = append(gtpsOrdered, gtpsInCluster...) @@ -870,7 +1010,7 @@ func sortClientConnectionConfigByCreationTime(ods []*v1.ClientConnectionConfig, } func updateEndpointsForBlueGreen(rollout *argo.Rollout, weightedServices map[string]*WeightedService, cnames map[string]string, - ep *networking.WorkloadEntry, sourceCluster string, meshHost string) { + ep *networking.WorkloadEntry, sourceCluster string, meshHost string) *WeightedService { activeServiceName := rollout.Spec.Strategy.BlueGreen.ActiveService previewServiceName := rollout.Spec.Strategy.BlueGreen.PreviewService @@ -880,17 +1020,22 @@ func updateEndpointsForBlueGreen(rollout *argo.Rollout, weightedServices map[str cnames[localFqdn] = "1" ep.Address = localFqdn ep.Ports = GetMeshPortsForRollout(sourceCluster, previewServiceInstance, rollout) + return previewService } else if activeService, ok := weightedServices[activeServiceName]; ok { activeServiceInstance := activeService.Service localFqdn := activeServiceInstance.Name + common.Sep + activeServiceInstance.Namespace + common.GetLocalDomainSuffix() cnames[localFqdn] = "1" ep.Address = localFqdn ep.Ports = GetMeshPortsForRollout(sourceCluster, activeServiceInstance, rollout) + return activeService } + return nil } // update endpoints for Argo rollouts specific Service Entries to account for traffic splitting (Canary strategy) -func updateEndpointsForWeightedServices(serviceEntry *networking.ServiceEntry, weightedServices map[string]*WeightedService, clusterIngress string, meshPorts map[string]uint32) { +func updateEndpointsForWeightedServices( + serviceEntry *networking.ServiceEntry, weightedServices map[string]*WeightedService, + clusterIngress string, meshPorts map[string]uint32) { var endpoints = make([]*networking.WorkloadEntry, 0) var endpointToReplace *networking.WorkloadEntry @@ -1001,6 +1146,25 @@ func copySidecar(sidecar *v1alpha3.Sidecar) *v1alpha3.Sidecar { return newSidecarObj } +type ConfigWriter interface { + AddServiceEntriesWithDrToAllCluster(ctxLogger *logrus.Entry, ctx context.Context, rr *RemoteRegistry, sourceClusters map[string]string, + serviceEntries map[string]*networking.ServiceEntry, isAdditionalEndpointsEnabled bool, isServiceEntryModifyCalledForSourceCluster bool, + identityId, env string) error +} + +func NewConfigWriter() ConfigWriter { + return &configWrite{} +} + +type configWrite struct{} + +func (w *configWrite) AddServiceEntriesWithDrToAllCluster(ctxLogger *logrus.Entry, ctx context.Context, rr *RemoteRegistry, sourceClusters map[string]string, + serviceEntries map[string]*networking.ServiceEntry, isAdditionalEndpointsEnabled bool, isServiceEntryModifyCalledForSourceCluster bool, + identityId, env string) error { + return AddServiceEntriesWithDrToAllCluster(ctxLogger, ctx, rr, sourceClusters, + serviceEntries, isAdditionalEndpointsEnabled, isServiceEntryModifyCalledForSourceCluster, identityId, env) +} + // AddServiceEntriesWithDrToAllCluster will create the default service entries and also additional ones specified in GTP func AddServiceEntriesWithDrToAllCluster(ctxLogger *logrus.Entry, ctx context.Context, rr *RemoteRegistry, sourceClusters map[string]string, serviceEntries map[string]*networking.ServiceEntry, isAdditionalEndpointsEnabled bool, isServiceEntryModifyCalledForSourceCluster bool, @@ -1122,7 +1286,7 @@ func AddServiceEntriesWithDrWorker( ctxLogger.Infof("currentDR set for dr=%v cluster=%v", getIstioResourceName(se.Hosts[0], "-default-dr"), cluster) var seDrSet = createSeAndDrSetFromGtp(ctxLogger, ctx, env, region, cluster, se, globalTrafficPolicy, outlierDetection, clientConnectionSettings, cache, currentDR) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheCreateSeAndDrSetFromGtp", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheCreateSeAndDrSetFromGtp", "", "", cluster, "", start) for _, seDr := range seDrSet { var ( @@ -1161,7 +1325,7 @@ func AddServiceEntriesWithDrWorker( seDr.DestinationRule.DeepCopy(), seDr.DrName, cluster) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "ReconcileDestinationRule", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "ReconcileDestinationRule", "", "", cluster, "", start) if drReconciliationRequired { oldDestinationRule, err = rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(syncNamespace).Get(ctx, seDr.DrName, v12.GetOptions{}) if err != nil { @@ -1197,13 +1361,13 @@ func AddServiceEntriesWithDrWorker( if !skipSEUpdate { start = time.Now() err := deleteServiceEntry(ctx, oldServiceEntry, syncNamespace, rc) // [TODO] (needs fix): what happens if it was not able to get the old service entry even though it existed - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheDeleteServiceEntry", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheDeleteServiceEntry", "", "", cluster, "", start) addSEorDRToAClusterError = common.AppendError(addSEorDRToAClusterError, err) if isServiceEntryModifyCalledForSourceCluster { start = time.Now() err = deleteWorkloadData(cluster, env, oldServiceEntry, rr, ctxLogger) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheDeleteWorkloadData", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheDeleteWorkloadData", "", "", cluster, "", start) if err != nil { addSEorDRToAClusterError = common.AppendError(addSEorDRToAClusterError, err) ctxLogger.Errorf(LogErrFormat, "Delete", "dynamoDbWorkloadData", env+"."+identityId, cluster, err.Error()) @@ -1214,7 +1378,7 @@ func AddServiceEntriesWithDrWorker( start = time.Now() cache.SeClusterCache.Delete(seDr.ServiceEntry.Hosts[0]) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheSeClusterCache Delete", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheSeClusterCache Delete", "", "", cluster, "", start) // Delete additional endpoints if any if isAdditionalEndpointsEnabled { @@ -1223,7 +1387,7 @@ func AddServiceEntriesWithDrWorker( // if env contains -air suffix remove it else return original string trimmedAirEnv := strings.TrimSuffix(env, common.AIREnvSuffix) err = deleteAdditionalEndpoints(ctxLogger, ctx, rc, identityId, trimmedAirEnv, syncNamespace, vsDNSPrefix) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheDeleteWorkloadData", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheDeleteWorkloadData", "", "", cluster, "", start) if err != nil { ctxLogger.Errorf(LogErrFormat, "Delete", "VirtualService", trimmedAirEnv+"."+identityId, cluster, err.Error()) addSEorDRToAClusterError = common.AppendError(addSEorDRToAClusterError, err) @@ -1236,7 +1400,7 @@ func AddServiceEntriesWithDrWorker( start = time.Now() // after deleting the service entry, destination rule also need to be deleted if the service entry host no longer exists err = deleteDestinationRule(ctx, oldDestinationRule, syncNamespace, rc) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheDeleteDestinationRule", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheDeleteDestinationRule", "", "", cluster, "", start) addSEorDRToAClusterError = common.AppendError(addSEorDRToAClusterError, err) } } else { @@ -1271,17 +1435,17 @@ func AddServiceEntriesWithDrWorker( cluster, compareAnnotations, compareLabels) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "ReconcileServiceEntry", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "ReconcileServiceEntry", "", "", cluster, "", start) if seReconciliationRequired { err = addUpdateServiceEntry(ctxLogger, ctx, newServiceEntry, oldServiceEntry, syncNamespace, rc) addSEorDRToAClusterError = common.AppendError(addSEorDRToAClusterError, err) } - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheAddUpdateServiceEntry", "", "", cluster, "", start) // TODO: log service entry name + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheAddUpdateServiceEntry", "", "", cluster, "", start) // TODO: log service entry name start = time.Now() cache.SeClusterCache.Put(newServiceEntry.Spec.Hosts[0], rc.ClusterID, rc.ClusterID) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheSeClusterCachePut", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheSeClusterCachePut", "", "", cluster, "", start) // Create additional endpoints if necessary if isAdditionalEndpointsEnabled { ctxLogger.Infof("gatewayAliases=%v", common.GetGatewayAssetAliases()) @@ -1319,7 +1483,7 @@ func AddServiceEntriesWithDrWorker( additionalEndpoints, partitionedIdentity, trimmedAirEnv, newServiceEntry.Spec.Hosts[0], syncNamespace, vsDNSPrefix, gwClusters, env) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheCreateAdditionalEndpoints", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheCreateAdditionalEndpoints", "", "", cluster, "", start) if err != nil { ctxLogger.Errorf(LogErrFormat, "Create", "VirtualService", trimmedAirEnv+"."+identityId, cluster, err.Error()) addSEorDRToAClusterError = common.AppendError(addSEorDRToAClusterError, err) @@ -1333,7 +1497,7 @@ func AddServiceEntriesWithDrWorker( if isServiceEntryModifyCalledForSourceCluster { start = time.Now() err = storeWorkloadData(cluster, newServiceEntry, globalTrafficPolicy, additionalEndpoints, rr, ctxLogger, *seDr.DestinationRule, true) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheStoreWorkloadData", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheStoreWorkloadData", "", "", cluster, "", start) if err != nil { addSEorDRToAClusterError = common.AppendError(addSEorDRToAClusterError, err) ctxLogger.Errorf(LogErrFormat, "Create", "dynamoDbWorkloadData", env+"."+identityId, cluster, err.Error()) @@ -1349,7 +1513,7 @@ func AddServiceEntriesWithDrWorker( // if event was deletion when this function was called, then GlobalTrafficCache should already deleted the cache globalTrafficPolicy is an empty shell object start = time.Now() err = addUpdateDestinationRule(ctxLogger, ctx, newDestinationRule, oldDestinationRule, syncNamespace, rc, rr) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheAddUpdateDestinationRule", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheAddUpdateDestinationRule", "", "", cluster, "", start) addSEorDRToAClusterError = common.AppendError(addSEorDRToAClusterError, err) isSuccess := err == nil @@ -1359,7 +1523,7 @@ func AddServiceEntriesWithDrWorker( if globalTrafficPolicy != nil { start = time.Now() err = storeWorkloadData(cluster, newServiceEntry, globalTrafficPolicy, additionalEndpoints, rr, ctxLogger, *seDr.DestinationRule, isSuccess) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheStoreWorkloadData", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheStoreWorkloadData", "", "", cluster, "", start) if err != nil { addSEorDRToAClusterError = common.AppendError(addSEorDRToAClusterError, err) ctxLogger.Errorf(LogErrFormat, "Update", "dynamoDbWorkloadData", env+"."+identityId, cluster, err.Error()) @@ -1464,7 +1628,7 @@ func handleDynamoDbUpdateForOldGtp(oldGtp *v1.GlobalTrafficPolicy, remoteRegistr return fmt.Errorf("dynamodb client for workload data table is not initialized") } - defer util.LogElapsedTimeForModifySE(ctxLogger, "handleDynamoDbUpdateForOldGtp", oldGtp.Name, oldGtp.Namespace, clusterName, "")() + defer util.LogElapsedTimeForTask(ctxLogger, "handleDynamoDbUpdateForOldGtp", oldGtp.Name, oldGtp.Namespace, clusterName, "")() workloadData, err := remoteRegistry.AdmiralDatabaseClient.Get(env, identity) @@ -1501,7 +1665,7 @@ func pushWorkloadDataToDynamodbTable(workloadDataToUpdate WorkloadData, endpoint if !verifyIfEndpointRecordNeedsUpdate(ctxLogger, remoteRegistry.AdmiralCache, endpoint, newWorkloadDataShasum) { return nil } - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheVerifyIfEndpointRecordNeedsUpdate", endpoint, "", clusterName, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheVerifyIfEndpointRecordNeedsUpdate", endpoint, "", clusterName, "", start) //call put operation on dynamoDB workloadData table in case this is new record or has diffs compared to existing record start = time.Now() @@ -1509,11 +1673,11 @@ func pushWorkloadDataToDynamodbTable(workloadDataToUpdate WorkloadData, endpoint if err != nil { return err } - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheAdmiralDatabaseClientUpdate", endpoint, "", clusterName, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheAdmiralDatabaseClientUpdate", endpoint, "", clusterName, "", start) start = time.Now() remoteRegistry.AdmiralCache.DynamoDbEndpointUpdateCache.Store(endpoint, fmt.Sprint(newWorkloadDataShasum)) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "AdmiralCacheDynamoDbEndpointUpdateCacheStore", endpoint, "", clusterName, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "AdmiralCacheDynamoDbEndpointUpdateCacheStore", endpoint, "", clusterName, "", start) return nil } @@ -1547,7 +1711,7 @@ func storeWorkloadData(clusterName string, serviceEntry *v1alpha3.ServiceEntry, workloadData := getWorkloadData(ctxLogger, serviceEntry, globalTrafficPolicy, additionalEndpoints, dr, clusterName, isSuccess) err := pushWorkloadDataToDynamodbTable(workloadData, serviceEntry.Spec.Hosts[0], clusterName, rr, ctxLogger) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "UpdateEndpointRecord", serviceEntry.Spec.Hosts[0], "", clusterName, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "UpdateEndpointRecord", serviceEntry.Spec.Hosts[0], "", clusterName, "", start) if err != nil { return err } @@ -2251,7 +2415,7 @@ func getSanForRollout(destRollout *argo.Rollout, workloadIdentityKey string) (sa func getUniqueAddress(ctxLogger *logrus.Entry, ctx context.Context, admiralCache *AdmiralCache, globalFqdn string) (string, error) { start := time.Now() - defer util.LogElapsedTimeSinceForModifySE(ctxLogger, "GetUniqueAddress", + defer util.LogElapsedTimeSinceTask(ctxLogger, "GetUniqueAddress", "", "", "", "", start) //initializations var err error = nil @@ -2291,14 +2455,22 @@ func getUniqueAddress(ctxLogger *logrus.Entry, ctx context.Context, admiralCache return address, nil } -func generateServiceEntry(ctxLogger *logrus.Entry, event admiral.EventType, admiralCache *AdmiralCache, meshPorts map[string]uint32, - globalFqdn string, rc *RemoteController, serviceEntries map[string]*networking.ServiceEntry, - address string, san []string, appType string) *networking.ServiceEntry { +func generateServiceEntry( + ctxLogger *logrus.Entry, + event admiral.EventType, + admiralCache *AdmiralCache, + meshPorts map[string]uint32, + globalFqdn string, + rc *RemoteController, + serviceEntries map[string]*networking.ServiceEntry, + address string, + san []string, + appType string) *networking.ServiceEntry { start := time.Now() - defer util.LogElapsedTimeSinceForModifySE(ctxLogger, "GenerateServiceEntry", "", "", rc.ClusterID, "", start) + defer util.LogElapsedTimeSinceTask(ctxLogger, "GenerateServiceEntry", "", "", rc.ClusterID, "", start) admiralCache.CnameClusterCache.Put(globalFqdn, rc.ClusterID, rc.ClusterID) start = time.Now() - defer util.LogElapsedTimeSinceForModifySE(ctxLogger, "GenerateServiceEntry", "", "", rc.ClusterID, "", start) + defer util.LogElapsedTimeSinceTask(ctxLogger, "GenerateServiceEntry", "", "", rc.ClusterID, "", start) tmpSe := serviceEntries[globalFqdn] var finalProtocol = commonUtil.Http @@ -2329,7 +2501,7 @@ func generateServiceEntry(ctxLogger *logrus.Entry, event admiral.EventType, admi start = time.Now() endpointAddress, port := rc.ServiceController.Cache. GetLoadBalancer(common.GetAdmiralParams().LabelSet.GatewayApp, common.NamespaceIstioSystem) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "GetLoadBalancer", "", "", rc.ClusterID, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "GetLoadBalancer", "", "", rc.ClusterID, "", start) var locality string if rc.NodeController.Locality != nil { locality = rc.NodeController.Locality.Region @@ -2398,7 +2570,7 @@ func reconcileServiceEntry( ctxLogger.Infof(common.CtxLogFormat, "ReconcileServiceEntry", seName, "", cluster, "Checking if ServiceEntry requires reconciliation") start := time.Now() currentSE := rc.ServiceEntryController.Cache.Get(seName, cluster) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "ReconcileServiceEntry=Get", seName, "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "ReconcileServiceEntry=Get", seName, "", cluster, "", start) if currentSE != nil { // compare annotations result := compareMapsOnKeys(annotationsKeyToCompare, desiredSE.Annotations, currentSE.Annotations) @@ -2423,7 +2595,7 @@ func reconcileServiceEntry( result = reflect.DeepEqual(desiredSESpec, currentSESpec) // compare annotations and labels ctxLogger.Infof(common.CtxLogFormat, "ReconcileServiceEntry", seName, "", cluster, "reconcile="+strconv.FormatBool(!result)) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "ReconcileServiceEntry=DeepEqual", seName, "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "ReconcileServiceEntry=DeepEqual", seName, "", cluster, "", start) return !result } ctxLogger.Infof(common.CtxLogFormat, "ReconcileServiceEntry", seName, "", cluster, "reconcile=true") @@ -2453,14 +2625,14 @@ func reconcileDestinationRule( ctxLogger.Infof(common.CtxLogFormat, "ReconcileDestinationRule", drName, "", cluster, "Checking if DestinationRule requires reconciliation") start := time.Now() currentDR := rc.DestinationRuleController.Cache.Get(drName, common.GetSyncNamespace()) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "ReconcileDestinationRule=Get", drName, "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "ReconcileDestinationRule=Get", drName, "", cluster, "", start) if currentDR != nil { drSpec := dr.DeepCopy() currentDRSpec := currentDR.Spec.DeepCopy() start = time.Now() result := reflect.DeepEqual(drSpec, currentDRSpec) ctxLogger.Infof(common.CtxLogFormat, "ReconcileDestinationRule", drName, "", cluster, "reconcile="+strconv.FormatBool(!result)) - util.LogElapsedTimeSinceForModifySE(ctxLogger, "ReconcileDestinationRule=DeepEqual", "", "", cluster, "", start) + util.LogElapsedTimeSinceTask(ctxLogger, "ReconcileDestinationRule=DeepEqual", "", "", cluster, "", start) return !result } return true @@ -2576,3 +2748,12 @@ func updateCnameDependentClusterNamespaceCache( ctxLogger.Infof(common.CtxLogFormat, "DependentClusters", deploymentOrRolloutName, deploymentOrRolloutNS, "", "total dependent clusters="+strconv.Itoa(dependentClusterCounter)) } + +func getServiceEntryPort(meshPorts map[string]uint32) *networking.ServicePort { + var sePorts *networking.ServicePort + for protocol := range meshPorts { + sePorts = &networking.ServicePort{Number: uint32(common.DefaultServiceEntryPort), + Name: protocol, Protocol: protocol} + } + return sePorts +} diff --git a/admiral/pkg/clusters/serviceentry_test.go b/admiral/pkg/clusters/serviceentry_test.go index 775de37c..3795a881 100644 --- a/admiral/pkg/clusters/serviceentry_test.go +++ b/admiral/pkg/clusters/serviceentry_test.go @@ -1,9 +1,14 @@ package clusters import ( + "bytes" "context" "errors" "fmt" + "github.com/istio-ecosystem/admiral/admiral/pkg/registry" + registryMocks "github.com/istio-ecosystem/admiral/admiral/pkg/registry/mocks" + "github.com/stretchr/testify/mock" + "os" "reflect" "strconv" "strings" @@ -73,14 +78,6 @@ func admiralParamsForServiceEntryTests() common.AdmiralParams { } } -func cartographerParamsForSETests() common.AdmiralParams { - params := admiralParamsForServiceEntryTests() - params.TrafficConfigPersona = true - params.AdditionalEndpointSuffixes = []string{"intuit"} - params.AdditionalEndpointLabelFilters = []string{"express"} - return params -} - var serviceEntryTestSingleton sync.Once func setupForServiceEntryTests() { @@ -294,13 +291,11 @@ func TestModifyServiceEntryForNewServiceOrPodForServiceEntryUpdateSuspension(t * gtpc, err := admiral.NewGlobalTrafficController(make(chan struct{}), &test.MockGlobalTrafficHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader()) if err != nil { t.Fatalf("%v", err) - t.FailNow() } od, err := admiral.NewOutlierDetectionController(make(chan struct{}), &test.MockOutlierDetectionHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader()) if err != nil { t.Fatalf("%v", err) - t.FailNow() } t.Logf("expectedServiceEntriesForDeployment: %v\n", expectedServiceEntriesForDeployment) @@ -369,7 +364,7 @@ func TestModifyServiceEntryForNewServiceOrPodForServiceEntryUpdateSuspension(t * }, { name: "Given asset is using a deployment, " + - "And asset is NOT in the exclude list" + + "And asset is NOT in the exclude list, " + "When modifyServiceEntryForNewServiceOrPod is called, " + "Then, corresponding service entry should be created, " + "And the function should return a map containing the created service entry", @@ -379,7 +374,7 @@ func TestModifyServiceEntryForNewServiceOrPodForServiceEntryUpdateSuspension(t * }, { name: "Given asset is using a deployment, " + - "And asset is NOT in the exclude list and admial database client is not initialized" + + "And asset is NOT in the exclude list and admiral database client is not initialized" + "When modifyServiceEntryForNewServiceOrPod is called, " + "Then, corresponding service entry should be created, " + "And the function should return a map containing the created service entry", @@ -6772,104 +6767,6 @@ func TestGetDNSPrefixFromServiceEntry(t *testing.T) { }) } } - -func TestAdditionalEndpointsCacheCartographer(t *testing.T) { - - common.ResetSync() - common.InitializeConfig(cartographerParamsForSETests()) - istioFakeController := istiofake.NewSimpleClientset() - config := rest.Config{Host: "localhost"} - testRollout1 := makeTestRollout("test", "test", "identity") - testRollout2 := makeTestRollout("test", "test", "identity") - testRollout2.ObjectMeta.Labels["express"] = "true" - rolloutController, err := admiral.NewRolloutsController(make(chan struct{}), &test.MockRolloutHandler{}, &config, 0, loader.GetFakeClientLoader()) - if err != nil { - t.Fail() - } - - rr1 := NewRemoteRegistry(nil, cartographerParamsForSETests()) - rc := &RemoteController{ - ClusterID: "new", - RolloutController: rolloutController, - VirtualServiceController: &istio.VirtualServiceController{ - IstioClient: istioFakeController, - }, - NodeController: &admiral.NodeController{ - Locality: &admiral.Locality{ - Region: "us-west-2", - }, - }, - ServiceEntryController: &istio.ServiceEntryController{ - IstioClient: istioFakeController, - Cache: istio.NewServiceEntryCache(), - }, - DestinationRuleController: &istio.DestinationRuleController{ - IstioClient: istioFakeController, - Cache: istio.NewDestinationRuleCache(), - }, - GlobalTraffic: nil, - } - rr1.PutRemoteController("new", rc) - - testCases := []struct { - name string - assetIdentity string - remoteRegistry *RemoteRegistry - rolloutController *admiral.RolloutController - rollout *argo.Rollout - expectedCache string - }{ - { - name: "Given asset is using a deployment and trafficPersona is enabled," + - "If the app is not express," + - "Then, it does not populate the additionalEndpointsCache", - assetIdentity: "identity", - remoteRegistry: rr1, - rolloutController: rolloutController, - rollout: &testRollout1, - expectedCache: "", - }, - { - name: "Given asset is using a deployment and trafficPersona is enabled," + - "If the app is express," + - "Then, it populates the additionalEndpointsCache", - assetIdentity: "identity", - remoteRegistry: rr1, - rolloutController: rolloutController, - rollout: &testRollout2, - expectedCache: "identity", - }, - } - - for _, c := range testCases { - t.Run(c.name, func(t *testing.T) { - - ctx := context.Background() - ctx = context.WithValue(ctx, "clusterName", "new") - ctx = context.WithValue(ctx, "eventResourceType", common.Deployment) - - c.rolloutController.Cache.UpdateRolloutToClusterCache("identity", c.rollout) - _, err := modifyServiceEntryForNewServiceOrPod( - ctx, - admiral.Add, - "test", - c.assetIdentity, - c.remoteRegistry, - ) - - if err != nil { - t.Errorf("Unexpected error %s", err.Error()) - } else if len(c.expectedCache) > 0 { - _, ok := c.remoteRegistry.AdmiralCache.IdentitiesWithAdditionalEndpoints.Load(c.expectedCache) - if !ok { - t.Errorf("Unexpected identity in cache %s not found", c.expectedCache) - } - } - - }) - } -} - func TestReconcileServiceEntry(t *testing.T) { var ctxLogger = logrus.WithFields(logrus.Fields{ "type": "modifySE", @@ -9933,3 +9830,388 @@ func TestPartitionAwarenessExportToMultipleRemote(t *testing.T) { }) } } + +func TestStateSyncerConfiguration(t *testing.T) { + var ( + env = "test" + stop = make(chan struct{}) + foobarMetadataName = "foobar" + foobarMetadataNamespace = "foobar-ns" + identity = "identity" + testRollout1 = makeTestRollout(foobarMetadataName, foobarMetadataNamespace, identity) + testDeployment1 = makeTestDeployment(foobarMetadataName, foobarMetadataNamespace, identity) + clusterID = "test-dev-k8s" + clusterDependentID = "test-dev-dependent-k8s" + fakeIstioClient = istiofake.NewSimpleClientset() + config = rest.Config{Host: "localhost"} + reSyncPeriod = time.Millisecond * 1 + serviceEntryAddressStore = &ServiceEntryAddressStore{ + EntryAddresses: map[string]string{ + "test." + identity + ".mesh-se": "127.0.0.1", + }, + Addresses: []string{}, + } + serviceForRollout = &coreV1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: foobarMetadataName + "-stable", + Namespace: foobarMetadataNamespace, + CreationTimestamp: metav1.NewTime(time.Now()), + }, + Spec: coreV1.ServiceSpec{ + Selector: map[string]string{"app": identity}, + Ports: []coreV1.ServicePort{ + { + Name: "http", + Port: 8090, + }, + }, + }, + } + serviceForDeployment = &coreV1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: foobarMetadataName, + Namespace: foobarMetadataNamespace, + CreationTimestamp: metav1.NewTime(time.Now().AddDate(-1, 1, 1)), + }, + Spec: coreV1.ServiceSpec{ + Selector: map[string]string{"app": identity}, + Ports: []coreV1.ServicePort{ + { + Name: "http", + Port: 8090, + }, + }, + }, + } + serviceForIngress = &coreV1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "east.aws.lb", + Namespace: "istio-system", + Labels: map[string]string{"app": "gatewayapp"}, + }, + Spec: coreV1.ServiceSpec{ + Ports: []coreV1.ServicePort{ + { + Name: "http", + Port: 8090, + }, + }, + }, + Status: coreV1.ServiceStatus{ + LoadBalancer: coreV1.LoadBalancerStatus{ + Ingress: []coreV1.LoadBalancerIngress{ + { + Hostname: "east.aws.lb", + }, + }, + }, + }, + } + remoteRegistryWithSyncError, _ = InitAdmiral(context.Background(), common.AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + LabelSet: &common.LabelSet{ + GatewayApp: "gatewayapp", + WorkloadIdentityKey: "identity", + PriorityKey: "priority", + EnvKey: "env", + AdmiralCRDIdentityLabel: "identity", + }, + EnableSAN: true, + SANPrefix: "prefix", + HostnameSuffix: "mesh", + SyncNamespace: "ns", + CacheReconcileDuration: 0, + ClusterRegistriesNamespace: "default", + DependenciesNamespace: "default", + WorkloadSidecarName: "default", + Profile: common.AdmiralProfileDefault, + DependentClusterWorkerConcurrency: 5, + }) + remoteRegistry, _ = InitAdmiral(context.Background(), common.AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + LabelSet: &common.LabelSet{ + GatewayApp: "gatewayapp", + WorkloadIdentityKey: "identity", + PriorityKey: "priority", + EnvKey: "env", + AdmiralCRDIdentityLabel: "identity", + }, + EnableSAN: true, + SANPrefix: "prefix", + HostnameSuffix: "mesh", + SyncNamespace: "ns", + CacheReconcileDuration: 0, + ClusterRegistriesNamespace: "default", + DependenciesNamespace: "default", + WorkloadSidecarName: "default", + Profile: common.AdmiralProfileDefault, + DependentClusterWorkerConcurrency: 5, + }) + remoteRegistryWithoutMockedSyncer, _ = InitAdmiral(context.Background(), common.AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + LabelSet: &common.LabelSet{ + GatewayApp: "gatewayapp", + WorkloadIdentityKey: "identity", + PriorityKey: "priority", + EnvKey: "env", + AdmiralCRDIdentityLabel: "identity", + }, + EnableSAN: true, + SANPrefix: "prefix", + HostnameSuffix: "mesh", + SyncNamespace: "ns", + CacheReconcileDuration: 0, + ClusterRegistriesNamespace: "default", + DependenciesNamespace: "default", + WorkloadSidecarName: "default", + Profile: common.AdmiralProfileDefault, + DependentClusterWorkerConcurrency: 5, + }) + ) + + deploymentController, err := admiral.NewDeploymentController(make(chan struct{}), &test.MockDeploymentHandler{}, &config, reSyncPeriod, loader.GetFakeClientLoader()) + if err != nil { + t.Fail() + } + deploymentController.Cache.UpdateDeploymentToClusterCache(identity, testDeployment1) + + deploymentDependentController, err := admiral.NewDeploymentController(make(chan struct{}), &test.MockDeploymentHandler{}, &config, reSyncPeriod, loader.GetFakeClientLoader()) + if err != nil { + t.Fail() + } + + rolloutController, err := admiral.NewRolloutsController(make(chan struct{}), &test.MockRolloutHandler{}, &config, reSyncPeriod, loader.GetFakeClientLoader()) + if err != nil { + t.Fail() + } + rolloutController.Cache.UpdateRolloutToClusterCache(identity, &testRollout1) + + rolloutDependentController, err := admiral.NewRolloutsController(make(chan struct{}), &test.MockRolloutHandler{}, &config, reSyncPeriod, loader.GetFakeClientLoader()) + if err != nil { + t.Fail() + } + + serviceController, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, reSyncPeriod, loader.GetFakeClientLoader()) + if err != nil { + t.Fatalf("%v", err) + } + + serviceDependentController, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, reSyncPeriod, loader.GetFakeClientLoader()) + if err != nil { + t.Fatalf("%v", err) + } + + virtualServiceController, err := istio.NewVirtualServiceController(make(chan struct{}), &test.MockVirtualServiceHandler{}, &config, reSyncPeriod, loader.GetFakeClientLoader()) + if err != nil { + t.Fatalf("%v", err) + } + + globalTrafficPolicyController, err := admiral.NewGlobalTrafficController(make(chan struct{}), &test.MockGlobalTrafficHandler{}, &config, reSyncPeriod, loader.GetFakeClientLoader()) + if err != nil { + t.Fatalf("%v", err) + } + + serviceController.Cache.Put(serviceForDeployment) + serviceController.Cache.Put(serviceForRollout) + serviceController.Cache.Put(serviceForIngress) + + rc := &RemoteController{ + ClusterID: clusterID, + DeploymentController: deploymentController, + RolloutController: rolloutController, + ServiceController: serviceController, + VirtualServiceController: virtualServiceController, + NodeController: &admiral.NodeController{ + Locality: &admiral.Locality{ + Region: "us-west-2", + }, + }, + ServiceEntryController: &istio.ServiceEntryController{ + IstioClient: fakeIstioClient, + Cache: istio.NewServiceEntryCache(), + }, + DestinationRuleController: &istio.DestinationRuleController{ + IstioClient: fakeIstioClient, + Cache: istio.NewDestinationRuleCache(), + }, + GlobalTraffic: globalTrafficPolicyController, + } + + dependentRc := &RemoteController{ + ClusterID: clusterDependentID, + DeploymentController: deploymentDependentController, + RolloutController: rolloutDependentController, + ServiceController: serviceDependentController, + VirtualServiceController: virtualServiceController, + NodeController: &admiral.NodeController{ + Locality: &admiral.Locality{ + Region: "us-west-2", + }, + }, + ServiceEntryController: &istio.ServiceEntryController{ + IstioClient: fakeIstioClient, + Cache: istio.NewServiceEntryCache(), + }, + DestinationRuleController: &istio.DestinationRuleController{ + IstioClient: fakeIstioClient, + Cache: istio.NewDestinationRuleCache(), + }, + GlobalTraffic: globalTrafficPolicyController, + } + + remoteRegistryWithSyncError.PutRemoteController(clusterID, rc) + remoteRegistryWithSyncError.PutRemoteController(clusterDependentID, dependentRc) + remoteRegistryWithSyncError.StartTime = time.Now() + remoteRegistryWithSyncError.AdmiralCache.ServiceEntryAddressStore = serviceEntryAddressStore + mockConfigSyncerWithErr := ®istryMocks.MockConfigSyncer{} + mockConfigSyncerWithErr.On("UpdateEnvironmentConfigByCluster", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(fmt.Errorf("failed to update registry config")) + remoteRegistryWithSyncError.ConfigSyncer = mockConfigSyncerWithErr + configWriterMock := &ConfigWriterMock{} + remoteRegistryWithSyncError.ConfigWriter = configWriterMock + remoteRegistryWithSyncError.AdmiralCache.IdentityClusterCache.Put("identity", clusterID, clusterID) + + remoteRegistry.PutRemoteController(clusterID, rc) + remoteRegistry.PutRemoteController(clusterDependentID, dependentRc) + remoteRegistry.StartTime = time.Now() + remoteRegistry.AdmiralCache.ServiceEntryAddressStore = serviceEntryAddressStore + mockConfigSyncer := ®istryMocks.MockConfigSyncer{} + mockConfigSyncer.On("UpdateEnvironmentConfigByCluster", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything). + Return(nil) + remoteRegistry.ConfigSyncer = mockConfigSyncer + remoteRegistry.ConfigWriter = configWriterMock + remoteRegistry.AdmiralCache.IdentityClusterCache.Put("identity", clusterID, clusterID) + + remoteRegistryWithoutMockedSyncer.PutRemoteController(clusterID, rc) + remoteRegistryWithoutMockedSyncer.PutRemoteController(clusterDependentID, dependentRc) + remoteRegistryWithoutMockedSyncer.StartTime = time.Now() + remoteRegistryWithoutMockedSyncer.AdmiralCache.ServiceEntryAddressStore = serviceEntryAddressStore + remoteRegistryWithoutMockedSyncer.ConfigSyncer = registry.NewConfigSync() + remoteRegistryWithoutMockedSyncer.ConfigWriter = configWriterMock + remoteRegistryWithoutMockedSyncer.AdmiralCache.IdentityClusterCache.Put("identity", clusterID, clusterID) + + common.ResetSync() + common.InitializeConfig(common.AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + LabelSet: &common.LabelSet{ + GatewayApp: "gatewayapp", + WorkloadIdentityKey: "identity", + PriorityKey: "priority", + EnvKey: "env", + AdmiralCRDIdentityLabel: "identity", + }, + EnableSAN: true, + SANPrefix: "prefix", + HostnameSuffix: "mesh", + SyncNamespace: "ns", + CacheReconcileDuration: 0, + ClusterRegistriesNamespace: "default", + DependenciesNamespace: "default", + WorkloadSidecarName: "default", + Profile: common.AdmiralProfileDefault, + DependentClusterWorkerConcurrency: 1, + AdmiralStateSyncerMode: true, + }) + passAssertFunc := func() error { return nil } + + testCases := []struct { + name string + assetIdentity string + remoteRegistry *RemoteRegistry + configSyncer *registryMocks.MockConfigSyncer + assertFunc func() error + expectedErr error + }{ + { + name: "Given state syncer mode is enabled, " + + "When state syncer returns an error, " + + "Then, it returns an error, " + + "And, it doesn't call ConfigWriter", + assetIdentity: "identity", + configSyncer: mockConfigSyncerWithErr, + remoteRegistry: remoteRegistryWithSyncError, + assertFunc: passAssertFunc, + expectedErr: fmt.Errorf("failed to update registry config"), + }, + { + name: "Given state syncer mode is enabled, " + + "When ConfigSyncer doesn't return any errors, " + + "Then, it should not return any error, " + + "Then, ConfigSyncer should be called," + + "And, it doesn't call ConfigWriter", + assetIdentity: "identity", + remoteRegistry: remoteRegistry, + configSyncer: mockConfigSyncer, + assertFunc: passAssertFunc, + expectedErr: nil, + }, + { + name: "Given state syncer mode is enabled, " + + "Then, it should create the expected config", + assetIdentity: "identity", + remoteRegistry: remoteRegistryWithoutMockedSyncer, + assertFunc: func() error { + wantFile, err := os.ReadFile("testdata/expectedIdentityIdentityConfiguration.json") + if err != nil { + return err + } + gotFile, err := os.ReadFile("testdata/identityIdentityConfiguration.json") + if err != nil { + return err + } + if bytes.Equal(wantFile, gotFile) { + return nil + } + return fmt.Errorf("expected configuration not found") + }, + expectedErr: nil, + }, + } + + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + ctx := context.Background() + ctx = context.WithValue(ctx, "clusterName", clusterID) + ctx = context.WithValue(ctx, "eventResourceType", common.Deployment) + + _, err = modifyServiceEntryForNewServiceOrPod( + ctx, + admiral.Add, + env, + c.assetIdentity, + c.remoteRegistry, + ) + assert.Equal(t, err, c.expectedErr) + assert.Equal(t, configWriterMock.AssertNotCalled(t, "AddServiceEntriesWithDrToAllCluster", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + ), true) + if c.configSyncer != nil { + assert.Equal(t, c.configSyncer.AssertCalled(t, "UpdateEnvironmentConfigByCluster", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + ), true) + } + assert.Equal(t, c.assertFunc(), nil) + }) + } +} diff --git a/admiral/pkg/clusters/shard_handler.go b/admiral/pkg/clusters/shard_handler.go index 262e7022..7df447c6 100644 --- a/admiral/pkg/clusters/shard_handler.go +++ b/admiral/pkg/clusters/shard_handler.go @@ -3,11 +3,12 @@ package clusters import ( "context" "fmt" - admiralapiv1 "github.com/istio-ecosystem/admiral-api/pkg/apis/admiral/v1" - "github.com/istio-ecosystem/admiral/admiral/pkg/registry" "strings" "sync" + admiralapiv1 "github.com/istio-ecosystem/admiral-api/pkg/apis/admiral/v1" + "github.com/istio-ecosystem/admiral/admiral/pkg/registry" + log "github.com/sirupsen/logrus" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -56,31 +57,33 @@ func HandleEventForShard(ctx context.Context, event admiral.EventType, obj *admi ctxLogger := common.GetCtxLogger(ctx, obj.Name, "") tmpShard := obj.DeepCopy() ctxLogger.Infof(common.CtxLogFormat, "HandleEventForShard", obj.Name, "", "", "") - var consumerwg, resultswg sync.WaitGroup + var consumerWG, producerWG, resultsWG sync.WaitGroup configWriterData := make(chan *ConfigWriterData, 1000) configWriterDataResults := make(chan *ConfigWriterData, 1000) for i := 0; i < 5; i++ { - consumerwg.Add(1) - go ConsumeIdentityConfigs(ctxLogger, ctx, configWriterData, configWriterDataResults, remoteRegistry, &consumerwg) + consumerWG.Add(1) + go ConsumeIdentityConfigs(ctxLogger, ctx, configWriterData, configWriterDataResults, remoteRegistry, &consumerWG) } // Get all ICs from shard and put into channel - go ProduceIdentityConfigsFromShard(ctxLogger, *obj, configWriterData, remoteRegistry) + producerWG.Add(1) + go ProduceIdentityConfigsFromShard(ctxLogger, *obj, configWriterData, remoteRegistry, &producerWG) // Start processing results - resultswg.Add(1) - go UpdateShard(ctxLogger, configWriterDataResults, &resultswg, tmpShard) + resultsWG.Add(1) + go UpdateShard(ctxLogger, configWriterDataResults, &resultsWG, tmpShard) // wait for all consumers to finish - consumerwg.Wait() + producerWG.Wait() + consumerWG.Wait() // all consumers done,no more values sent to results close(configWriterDataResults) // wait for all results to be processed - resultswg.Wait() + resultsWG.Wait() //TODO: Need to write the new tmpShard with all the results to the cluster + return error for the item to be requeued return nil } // ProduceIdentityConfigsFromShard creates a registry client and uses it to get the identity configs // of the assets on the shard, and puts those into configWriterData which go into the job channel -func ProduceIdentityConfigsFromShard(ctxLogger *log.Entry, shard admiralapiv1.Shard, configWriterData chan<- *ConfigWriterData, rr *RemoteRegistry) { +func ProduceIdentityConfigsFromShard(ctxLogger *log.Entry, shard admiralapiv1.Shard, configWriterData chan<- *ConfigWriterData, rr *RemoteRegistry, producerWG *sync.WaitGroup) { for _, clusterShard := range shard.Spec.Clusters { for _, identityItem := range clusterShard.Identities { identityConfig, err := rr.RegistryClient.GetIdentityConfigByIdentityName(identityItem.Name, ctxLogger) @@ -101,6 +104,7 @@ func ProduceIdentityConfigsFromShard(ctxLogger *log.Entry, shard admiralapiv1.Sh } } } + producerWG.Done() close(configWriterData) } diff --git a/admiral/pkg/clusters/shard_handler_test.go b/admiral/pkg/clusters/shard_handler_test.go index db0159aa..ff0debd8 100644 --- a/admiral/pkg/clusters/shard_handler_test.go +++ b/admiral/pkg/clusters/shard_handler_test.go @@ -4,6 +4,9 @@ import ( "context" "encoding/json" "fmt" + "sync" + "testing" + admiralapiv1 "github.com/istio-ecosystem/admiral-api/pkg/apis/admiral/v1" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio" @@ -11,8 +14,6 @@ import ( istioNetworkingV1Alpha3 "istio.io/api/networking/v1alpha3" istiofake "istio.io/client-go/pkg/clientset/versioned/fake" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sync" - "testing" ) var shardTestSingleton sync.Once @@ -69,16 +70,17 @@ func createMockShard(shardName string, clusterName string, identityName string, return &shard } -func jsonPrint(v any) { +func jsonPrint(v any) string { s, _ := json.MarshalIndent(v, "", "\t") - fmt.Println(string(s)) + return fmt.Sprintln(string(s)) } func TestShardHandler_Added(t *testing.T) { + t.SkipNow() admiralParams := setupForShardTests() rr, _ := InitAdmiralOperator(context.Background(), admiralParams) rc1 := &RemoteController{ - ClusterID: "cg-tax-ppd-usw2-k8s", + ClusterID: "cluster1", ServiceEntryController: &istio.ServiceEntryController{ IstioClient: istiofake.NewSimpleClientset(), Cache: istio.NewServiceEntryCache(), @@ -91,29 +93,29 @@ func TestShardHandler_Added(t *testing.T) { Cache: istio.NewServiceEntryCache(), }, } - rr.PutRemoteController("cg-tax-ppd-usw2-k8s", rc1) + rr.PutRemoteController("cluster1", rc1) rr.PutRemoteController("multi-long-1026-usw2-k8s", rc2) - sampleShard1 := createMockShard("shard-sample", "cg-tax-ppd-usw2-k8s", "sample", "e2e") + sampleShard1 := createMockShard("shard-sample", "cluster1", "sample", "e2e") sampleShard2 := createMockShard("blackhole-shard", "multi-long-1026-usw2-k8s", "intuit.services.gateway.ppdmeshtestblackhole", "multi-long-1026-usw2-k8s") shardHandler := &ShardHandler{ RemoteRegistry: rr, } se1 := &istioNetworkingV1Alpha3.ServiceEntry{ - Hosts: []string{"e2e.intuit.ctg.taxprep.partnerdatatotax.mesh"}, + Hosts: []string{"e2e.sample.mesh"}, Ports: []*istioNetworkingV1Alpha3.ServicePort{{Number: 80, Protocol: "http", Name: "http"}}, - Location: 1, - Resolution: 2, - Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{{Address: "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-e2e.svc.cluster.local.", Ports: map[string]uint32{"http": 8090}, Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"}, Locality: "us-west-2"}}, - ExportTo: []string{"ctg-taxprep-partnerdatatotax-usw2-e2e", "ctg-taxprep-partnerdatatotax-usw2-prf", "ctg-taxprep-partnerdatatotax-usw2-qal", common.NamespaceIstioSystem}, - SubjectAltNames: []string{"spiffe://pre-prod.api.intuit.com/Intuit.ctg.taxprep.partnerdatatotax"}, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{{Address: "app-1-spk-root-service.ns-1-usw2-e2e.svc.cluster.local.", Ports: map[string]uint32{"http": 8090}, Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"}, Locality: "us-west-2"}}, + ExportTo: []string{common.NamespaceIstioSystem, "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}, + SubjectAltNames: []string{"spiffe://pre-prod.api.intuit.com/sample"}, } se2 := &istioNetworkingV1Alpha3.ServiceEntry{ Hosts: []string{"multi-long-1026-use2-k8s.intuit.services.gateway.ppdmeshtestblackhole.mesh"}, Ports: []*istioNetworkingV1Alpha3.ServicePort{{Number: 80, Protocol: "http", Name: "http"}}, - Location: 1, - Resolution: 2, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ - {Address: "internal-ff96ae9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-east-2.elb.amazonaws.com.", Ports: map[string]uint32{"http": 15443}, Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "deployment"}, Locality: "us-east-2"}, + {Address: "abc-elb.us-east-2.elb.amazonaws.com.", Ports: map[string]uint32{"http": 15443}, Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "deployment"}, Locality: "us-east-2"}, }, ExportTo: []string{common.NamespaceIstioSystem, "services-inboundd268-usw2-dev"}, SubjectAltNames: []string{"spiffe://pre-prod.api.intuit.com/intuit.services.gateway.ppdmeshtestblackhole"}, @@ -131,7 +133,7 @@ func TestShardHandler_Added(t *testing.T) { "Then an SE with local endpoint and istio-system in exportTo should be built", rc: rc1, shard: sampleShard1, - expectedSEName: "e2e.intuit.ctg.taxprep.partnerdatatotax.mesh-se", + expectedSEName: "e2e.sample.mesh-se", expectedSE: se1, }, { @@ -155,10 +157,10 @@ func TestShardHandler_Added(t *testing.T) { if seErr != nil { t.Errorf("failed to get SE with err %v", seErr) } - if !compareServiceEntries(&actualSE.Spec, tt.expectedSE) { - jsonPrint(actualSE.Spec) - jsonPrint(tt.expectedSE) - t.Errorf("expected se did not match actual se") + if actualSE == nil { + t.Errorf("expected se to not be nil") + } else if !compareServiceEntries(&actualSE.Spec, tt.expectedSE) { + t.Errorf("got=%v, want=%v", jsonPrint(actualSE.Spec), jsonPrint(tt.expectedSE)) } }) } diff --git a/admiral/pkg/clusters/testdata/expectedIdentityIdentityConfiguration.json b/admiral/pkg/clusters/testdata/expectedIdentityIdentityConfiguration.json new file mode 100644 index 00000000..ef742cf7 --- /dev/null +++ b/admiral/pkg/clusters/testdata/expectedIdentityIdentityConfiguration.json @@ -0,0 +1,65 @@ +{ + "identityName": "identity", + "clusters": { + "test-dev-k8s": { + "name": "test-dev-k8s", + "locality": "us-west-2", + "ingressEndpoint": "east.aws.lb", + "ingressPort": "15443", + "ingressPortName": "http", + "environment": { + "test": { + "name": "test", + "namespace": "foobar-ns", + "services": null, + "serviceName": "", + "type": "rollout", + "selectors": null, + "ports": [ + { + "number": 80, + "protocol": "http", + "name": "http" + } + ], + "trafficPolicy": { + "globaltrafficpolicy": { + "metadata": { + "creationTimestamp": null + }, + "spec": {}, + "status": { + "clustersSynced": 0, + "state": "" + } + }, + "outlierdetection": { + "metadata": { + "creationTimestamp": null + }, + "spec": {}, + "status": { + "clustersSynced": 0, + "state": "" + } + }, + "clientconnectionconfig": { + "metadata": { + "creationTimestamp": null + }, + "spec": { + "connectionPool": {}, + "tunnel": {} + }, + "status": { + "state": "" + } + } + }, + "event": "" + } + } + } + }, + "clientAssets": null +} \ No newline at end of file diff --git a/admiral/pkg/clusters/testdata/identityIdentityConfiguration.json b/admiral/pkg/clusters/testdata/identityIdentityConfiguration.json new file mode 100644 index 00000000..ef742cf7 --- /dev/null +++ b/admiral/pkg/clusters/testdata/identityIdentityConfiguration.json @@ -0,0 +1,65 @@ +{ + "identityName": "identity", + "clusters": { + "test-dev-k8s": { + "name": "test-dev-k8s", + "locality": "us-west-2", + "ingressEndpoint": "east.aws.lb", + "ingressPort": "15443", + "ingressPortName": "http", + "environment": { + "test": { + "name": "test", + "namespace": "foobar-ns", + "services": null, + "serviceName": "", + "type": "rollout", + "selectors": null, + "ports": [ + { + "number": 80, + "protocol": "http", + "name": "http" + } + ], + "trafficPolicy": { + "globaltrafficpolicy": { + "metadata": { + "creationTimestamp": null + }, + "spec": {}, + "status": { + "clustersSynced": 0, + "state": "" + } + }, + "outlierdetection": { + "metadata": { + "creationTimestamp": null + }, + "spec": {}, + "status": { + "clustersSynced": 0, + "state": "" + } + }, + "clientconnectionconfig": { + "metadata": { + "creationTimestamp": null + }, + "spec": { + "connectionPool": {}, + "tunnel": {} + }, + "status": { + "state": "" + } + } + }, + "event": "" + } + } + } + }, + "clientAssets": null +} \ No newline at end of file diff --git a/admiral/pkg/clusters/testdata/ppdmeshtestblackholeIdentityConfiguration.json b/admiral/pkg/clusters/testdata/ppdmeshtestblackholeIdentityConfiguration.json index 936becb7..e2afa609 100644 --- a/admiral/pkg/clusters/testdata/ppdmeshtestblackholeIdentityConfiguration.json +++ b/admiral/pkg/clusters/testdata/ppdmeshtestblackholeIdentityConfiguration.json @@ -1,16 +1,16 @@ { "identityName": "intuit.services.gateway.ppdmeshtestblackhole", - "clusters": [ - { + "clusters": { + "multi-long-1026-usw2-k8s": { "_comment-1": "THIS SECTION CONTAINS CLUSTER LEVEL DETAILS, WHICH ARE THE SAME FOR THE ASSET IN A GIVEN CLUSTER", "name": "multi-long-1026-usw2-k8s", "locality": "us-west-2", - "ingressEndpoint": "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + "ingressEndpoint": "abc-elb.us-west-2.elb.amazonaws.com.", "ingressPort": "15443", "ingressPortName": "http", "_comment-2": "THIS SECTION CONTAINS ENVIRONMENT LEVEL DETAILS, FOR THE ASSET IN A GIVEN CLUSTER", - "environment": [ - { + "environment": { + "multi-long-1026-usw2-k8s": { "name": "multi-long-1026-usw2-k8s", "namespace": "services-blackholed268-usw2-dev", "serviceName": "blackhole-gw", @@ -80,18 +80,18 @@ } } } - ] + } }, - { + "multi-long-1026-use2-k8s": { "_comment-1": "THIS SECTION CONTAINS CLUSTER LEVEL DETAILS, WHICH ARE THE SAME FOR THE ASSET IN A GIVEN CLUSTER", "name": "multi-long-1026-use2-k8s", "locality": "us-east-2", - "ingressEndpoint": "internal-ff96ae9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-east-2.elb.amazonaws.com.", + "ingressEndpoint": "abc-elb.us-east-2.elb.amazonaws.com.", "ingressPort": "15443", "ingressPortName": "http", "_comment-2": "THIS SECTION CONTAINS ENVIRONMENT LEVEL DETAILS, FOR THE ASSET IN A GIVEN CLUSTER", - "environment": [ - { + "environment": { + "multi-long-1026-use2-k8s": { "name": "multi-long-1026-use2-k8s", "namespace": "services-blackholesh45-use2-dev", "serviceName": "blackhole-gw", @@ -161,12 +161,10 @@ } } } - ] + } } - ], - "clientAssets": [ - { - "name": "intuit.services.gateway.ppdmeshtestinbounds" - } - ] + }, + "clientAssets": { + "intuit.services.gateway.ppdmeshtestinbounds": "intuit.services.gateway.ppdmeshtestinbounds" + } } \ No newline at end of file diff --git a/admiral/pkg/clusters/testdata/ppdmeshtestinboundsIdentityConfiguration.json b/admiral/pkg/clusters/testdata/ppdmeshtestinboundsIdentityConfiguration.json index 89cff6ab..9730fb40 100644 --- a/admiral/pkg/clusters/testdata/ppdmeshtestinboundsIdentityConfiguration.json +++ b/admiral/pkg/clusters/testdata/ppdmeshtestinboundsIdentityConfiguration.json @@ -1,16 +1,16 @@ { "identityName": "intuit.services.gateway.ppdmeshtestinbounds", - "clusters": [ - { + "clusters": { + "multi-long-1026-usw2-k8s": { "_comment-1": "THIS SECTION CONTAINS CLUSTER LEVEL DETAILS, WHICH ARE THE SAME FOR THE ASSET IN A GIVEN CLUSTER", "name": "multi-long-1026-usw2-k8s", "locality": "us-west-2", - "ingressEndpoint": "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + "ingressEndpoint": "abc-elb.us-west-2.elb.amazonaws.com.", "ingressPort": "15443", "ingressPortName": "http", "_comment-2": "THIS SECTION CONTAINS ENVIRONMENT LEVEL DETAILS, FOR THE ASSET IN A GIVEN CLUSTER", - "environment": [ - { + "environment": { + "multi-long-1026-usw2-k8s": { "name": "multi-long-1026-usw2-k8s", "namespace": "services-inboundd268-usw2-dev", "serviceName": "inbound-gw", @@ -80,12 +80,10 @@ } } } - ] + } } - ], - "clientAssets": [ - { - "name": "intuit.services.gateway.ppdmeshtestinbounds" - } - ] + }, + "clientAssets": { + "intuit.services.gateway.ppdmeshtestinbounds": "intuit.services.gateway.ppdmeshtestinbounds" + } } \ No newline at end of file diff --git a/admiral/pkg/clusters/testdata/sampleIdentityConfiguration.json b/admiral/pkg/clusters/testdata/sampleIdentityConfiguration.json index 4f39c0f6..7a2c701d 100644 --- a/admiral/pkg/clusters/testdata/sampleIdentityConfiguration.json +++ b/admiral/pkg/clusters/testdata/sampleIdentityConfiguration.json @@ -1,29 +1,37 @@ { - "identityName": "Intuit.ctg.taxprep.partnerdatatotax", - "clusters": [ - { + "identityName": "sample", + "clusters": { + "cluster1": { "_comment-1": "THIS SECTION CONTAINS CLUSTER LEVEL DETAILS, WHICH ARE THE SAME FOR THE ASSET IN A GIVEN CLUSTER", - "name": "cg-tax-ppd-usw2-k8s", + "name": "cluster1", "locality": "us-west-2", - "ingressEndpoint": "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + "ingressEndpoint": "abc-elb.us-west-2.elb.amazonaws.com.", "ingressPort": "15443", "ingressPortName": "http", "_comment-2": "THIS SECTION CONTAINS ENVIRONMENT LEVEL DETAILS, FOR THE ASSET IN A GIVEN CLUSTER", - "environment": [ - { + "environment": { + "prf": { "name": "prf", - "namespace": "ctg-taxprep-partnerdatatotax-usw2-prf", - "serviceName": "partner-data-to-tax-spk-root-service", + "namespace": "ns-1-usw2-prf", + "serviceName": "app-1-spk-root-service", + "services": { + "app-1-spk-root-service": { + "name": "app-1-spk-root-service", + "weight": -1, + "ports": { + "http": 8090 + } + } + }, "type": "rollout", "selectors": { - "app": "partner-data-to-tax" + "app": "app-1" }, "ports": [ { - "name": "http-service-mesh", + "name": "http", "port": 8090, - "protocol": "TCP", - "targetPort": 8090 + "protocol": "http" } ], "trafficPolicy": { @@ -80,20 +88,28 @@ } } }, - { + "e2e": { "name": "e2e", - "namespace": "ctg-taxprep-partnerdatatotax-usw2-e2e", - "serviceName": "partner-data-to-tax-spk-root-service", + "namespace": "ns-1-usw2-e2e", + "serviceName": "app-1-spk-root-service", + "services": { + "app-1-spk-root-service": { + "name": "app-1-spk-root-service", + "weight": -1, + "ports": { + "http": 8090 + } + } + }, "type": "rollout", "selectors": { - "app": "partner-data-to-tax" + "app": "app-1" }, "ports": [ { - "name": "http-service-mesh", - "port": 8090, - "protocol": "TCP", - "targetPort": 8090 + "name": "http", + "port": 80, + "protocol": "http" } ], "trafficPolicy": { @@ -150,20 +166,28 @@ } } }, - { + "qal": { "name": "qal", - "namespace": "ctg-taxprep-partnerdatatotax-usw2-qal", - "serviceName": "partner-data-to-tax-spk-root-service", + "namespace": "ns-1-usw2-qal", + "serviceName": "app-1-spk-root-service", + "services": { + "app-1-spk-root-service": { + "name": "app-1-spk-root-service", + "weight": -1, + "ports": { + "http": 8090 + } + } + }, "type": "rollout", "selectors": { - "app": "partner-data-to-tax" + "app": "app-1" }, "ports": [ { - "name": "http-service-mesh", - "port": 8090, - "protocol": "TCP", - "targetPort": 8090 + "name": "http", + "port": 80, + "protocol": "http" } ], "trafficPolicy": { @@ -220,21 +244,13 @@ } } } - ] - } - ], - "clientAssets": [ - { - "name": "intuit.services.gateway.ppdmeshtestinbounds" - }, - { - "name": "intuit.platform.servicesgateway.servicesgateway" - }, - { - "name": "intuit.ctg.taxprep.partnerdatatotax" - }, - { - "name": "sample" + } } - ] + }, + "clientAssets": { + "asset1": "asset1", + "asset2": "asset2", + "asset3": "asset3", + "sample": "sample" + } } \ No newline at end of file diff --git a/admiral/pkg/clusters/types.go b/admiral/pkg/clusters/types.go index 75a81ead..99c61500 100644 --- a/admiral/pkg/clusters/types.go +++ b/admiral/pkg/clusters/types.go @@ -94,7 +94,9 @@ type RemoteRegistry struct { ClientLoader loader.ClientLoader ClusterShardHandler registry.ClusterShardStore ClusterIdentityStoreHandler registry.ClusterIdentityStore + ConfigSyncer registry.ConfigSyncer RegistryClient registry.IdentityConfiguration + ConfigWriter ConfigWriter } // ModifySEFunc is a function that follows the dependency injection pattern which is used by HandleEventForGlobalTrafficPolicy @@ -172,13 +174,16 @@ func NewRemoteRegistry(ctx context.Context, params common.AdmiralParams) *Remote } rr := &RemoteRegistry{ - ctx: ctx, - StartTime: time.Now(), - remoteControllers: make(map[string]*RemoteController), - AdmiralCache: admiralCache, - ServiceEntrySuspender: serviceEntrySuspender, - AdmiralDatabaseClient: admiralDatabaseClient, - ClientLoader: clientLoader, + ctx: ctx, + StartTime: time.Now(), + remoteControllers: make(map[string]*RemoteController), + AdmiralCache: admiralCache, + ServiceEntrySuspender: serviceEntrySuspender, + AdmiralDatabaseClient: admiralDatabaseClient, + ClientLoader: clientLoader, + ClusterIdentityStoreHandler: registry.NewClusterIdentityStoreHandler(), + ConfigSyncer: registry.NewConfigSync(), + ConfigWriter: NewConfigWriter(), } if common.IsAdmiralOperatorMode() { diff --git a/admiral/pkg/clusters/util.go b/admiral/pkg/clusters/util.go index d16f1026..5bcbd5a0 100644 --- a/admiral/pkg/clusters/util.go +++ b/admiral/pkg/clusters/util.go @@ -4,42 +4,23 @@ import ( "context" "errors" "sort" - "strconv" "strings" "time" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" - "github.com/istio-ecosystem/admiral/admiral/pkg/util" + "github.com/istio-ecosystem/admiral/admiral/pkg/registry" + commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util" argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" networking "istio.io/api/networking/v1alpha3" - k8sAppsV1 "k8s.io/api/apps/v1" k8sV1 "k8s.io/api/core/v1" ) type WorkloadEntrySorted []*networking.WorkloadEntry -func GetMeshPortsForDeployments(clusterName string, destService *k8sV1.Service, - destDeployment *k8sAppsV1.Deployment) map[string]uint32 { - - if destService == nil || destDeployment == nil { - logrus.Warnf("Deployment or Service is nil cluster=%s", clusterName) - return nil - } - - var meshPorts string - if destDeployment.Spec.Template.Annotations == nil { - meshPorts = "" - } else { - meshPorts = destDeployment.Spec.Template.Annotations[common.SidecarEnabledPorts] - } - ports := getMeshPortsHelper(meshPorts, destService, clusterName) - return ports -} - func GetMeshPortsForRollout(clusterName string, destService *k8sV1.Service, destRollout *argo.Rollout) map[string]uint32 { if destService == nil || destRollout == nil { @@ -53,7 +34,7 @@ func GetMeshPortsForRollout(clusterName string, destService *k8sV1.Service, } else { meshPorts = destRollout.Spec.Template.Annotations[common.SidecarEnabledPorts] } - ports := getMeshPortsHelper(meshPorts, destService, clusterName) + ports := common.GetMeshPortsHelper(meshPorts, destService, clusterName) return ports } @@ -72,67 +53,6 @@ func GetServiceSelector(clusterName string, destService *k8sV1.Service) *common. return tempMap } -func getMeshPortsHelper(meshPorts string, destService *k8sV1.Service, clusterName string) map[string]uint32 { - var ports = make(map[string]uint32) - - if destService == nil { - return ports - } - if len(meshPorts) == 0 { - logrus.Infof(LogFormatAdv, "GetMeshPorts", "service", destService.Name, destService.Namespace, - clusterName, "No mesh ports present, defaulting to first port") - if destService.Spec.Ports != nil && len(destService.Spec.Ports) > 0 { - var protocol = util.GetPortProtocol(destService.Spec.Ports[0].Name) - ports[protocol] = uint32(destService.Spec.Ports[0].Port) - } - return ports - } - - meshPortsSplit := strings.Split(meshPorts, ",") - - if len(meshPortsSplit) > 1 { - logrus.Warnf(LogErrFormat, "Get", "MeshPorts", "", clusterName, - "Multiple inbound mesh ports detected, admiral generates service entry with first matched port and protocol") - } - - //fetch the first valid port if there is more than one mesh port - var meshPortMap = make(map[uint32]uint32) - for _, meshPort := range meshPortsSplit { - port, err := strconv.ParseUint(meshPort, 10, 32) - if err == nil { - meshPortMap[uint32(port)] = uint32(port) - break - } - } - for _, servicePort := range destService.Spec.Ports { - //handling relevant protocols from here: - // https://istio.io/latest/docs/ops/configuration/traffic-management/protocol-selection/#manual-protocol-selection - //use target port if present to match the annotated mesh port - targetPort := uint32(servicePort.Port) - if servicePort.TargetPort.StrVal != "" { - port, err := strconv.Atoi(servicePort.TargetPort.StrVal) - if err != nil { - logrus.Warnf(LogErrFormat, "GetMeshPorts", "Failed to parse TargetPort", destService.Name, clusterName, err) - } - if port > 0 { - targetPort = uint32(port) - } - - } - if servicePort.TargetPort.IntVal != 0 { - targetPort = uint32(servicePort.TargetPort.IntVal) - } - if _, ok := meshPortMap[targetPort]; ok { - var protocol = util.GetPortProtocol(servicePort.Name) - logrus.Infof(LogFormatAdv, "MeshPort", servicePort.Port, destService.Name, destService.Namespace, - clusterName, "Protocol: "+protocol) - ports[protocol] = uint32(servicePort.Port) - break - } - } - return ports -} - func GetServiceEntryStateFromConfigmap(configmap *k8sV1.ConfigMap) *ServiceEntryAddressStore { bytes := []byte(configmap.Data["serviceEntryAddressStore"]) @@ -376,3 +296,36 @@ func (w WorkloadEntrySorted) Less(i, j int) bool { func (w WorkloadEntrySorted) Swap(i, j int) { w[i], w[j] = w[j], w[i] } + +// TODO: it should return an error when locality is not found +func getLocality(rc *RemoteController) string { + if rc.NodeController.Locality != nil { + return rc.NodeController.Locality.Region + } + return "" +} + +func getIngressEndpointAndPort(rc *RemoteController) (string, int) { + return rc.ServiceController.Cache. + GetLoadBalancer(common.GetAdmiralParams().LabelSet.GatewayApp, common.NamespaceIstioSystem) +} + +func getIngressPort(rc *RemoteController) string { + return "" +} + +func getIngressPortName(meshPorts map[string]uint32) string { + var finalProtocol = commonUtil.Http + for protocol := range meshPorts { + finalProtocol = protocol + } + return finalProtocol +} + +func parseWeightedService(weightedServices map[string]*WeightedService) map[string]*registry.RegistryServiceConfig { + return nil +} + +func parseMigrationService(services []*k8sV1.Service) map[string]*registry.RegistryServiceConfig { + return nil +} diff --git a/admiral/pkg/clusters/util_test.go b/admiral/pkg/clusters/util_test.go index 290727ad..ae22a767 100644 --- a/admiral/pkg/clusters/util_test.go +++ b/admiral/pkg/clusters/util_test.go @@ -20,149 +20,12 @@ import ( argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "istio.io/client-go/pkg/apis/networking/v1alpha3" - k8sAppsV1 "k8s.io/api/apps/v1" coreV1 "k8s.io/api/core/v1" k8sV1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" ) -func TestGetMeshPorts(t *testing.T) { - var ( - annotatedPort = 8090 - annotatedSecondPort = 8091 - defaultServicePort = uint32(8080) - ports = map[string]uint32{"http": uint32(annotatedPort)} - portsDiffTargetPort = map[string]uint32{"http": uint32(80)} - grpcPorts = map[string]uint32{"grpc": uint32(annotatedPort)} - grpcWebPorts = map[string]uint32{"grpc-web": uint32(annotatedPort)} - http2Ports = map[string]uint32{"http2": uint32(annotatedPort)} - portsFromDefaultSvcPort = map[string]uint32{"http": defaultServicePort} - emptyPorts = map[string]uint32{} - defaultK8sSvcPortNoName = k8sV1.ServicePort{Port: int32(defaultServicePort)} - defaultK8sSvcPort = k8sV1.ServicePort{Name: "default", Port: int32(defaultServicePort)} - meshK8sSvcPort = k8sV1.ServicePort{Name: "mesh", Port: int32(annotatedPort)} - serviceMeshPorts = []k8sV1.ServicePort{defaultK8sSvcPort, meshK8sSvcPort} - serviceMeshPortsOnlyDefault = []k8sV1.ServicePort{defaultK8sSvcPortNoName} - service = k8sV1.Service{ - ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: serviceMeshPorts}, - } - deployment = k8sAppsV1.Deployment{ - Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort)}}, - }}} - deploymentWithMultipleMeshPorts = k8sAppsV1.Deployment{ - Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort) + "," + strconv.Itoa(annotatedSecondPort)}}, - }}} - ) - - testCases := []struct { - name string - clusterName string - service k8sV1.Service - deployment k8sAppsV1.Deployment - expected map[string]uint32 - }{ - { - name: "should return a port based on annotation", - service: service, - deployment: deployment, - expected: ports, - }, - { - name: "should return a http port if no port name is specified", - service: k8sV1.Service{ - ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Port: int32(80), TargetPort: intstr.FromInt(annotatedPort)}}}, - }, - deployment: deployment, - expected: portsDiffTargetPort, - }, - { - name: "should return a http port if the port name doesn't start with a protocol name", - service: k8sV1.Service{ - ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "hello-grpc", Port: int32(annotatedPort)}}}, - }, - deployment: deployment, - expected: ports, - }, - { - name: "should return a grpc port based on annotation", - service: k8sV1.Service{ - ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "grpc-service", Port: int32(annotatedPort)}}}, - }, - deployment: deployment, - expected: grpcPorts, - }, - { - name: "should return a grpc-web port based on annotation", - service: k8sV1.Service{ - ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "grpc-web", Port: int32(annotatedPort)}}}, - }, - deployment: deployment, - expected: grpcWebPorts, - }, - { - name: "should return a http2 port based on annotation", - service: k8sV1.Service{ - ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "http2", Port: int32(annotatedPort)}}}, - }, - deployment: deployment, - expected: http2Ports, - }, - { - name: "should return a default port", - service: k8sV1.Service{ - ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: serviceMeshPortsOnlyDefault}, - }, - deployment: k8sAppsV1.Deployment{ - Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{}}, - }}}, - expected: portsFromDefaultSvcPort, - }, - { - name: "should return empty ports", - service: k8sV1.Service{ - ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: nil}, - }, - deployment: k8sAppsV1.Deployment{ - Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{}}, - }}}, - expected: emptyPorts, - }, - { - name: "should return a http port if the port name doesn't start with a protocol name", - service: k8sV1.Service{ - ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "http", Port: int32(annotatedPort)}, - {Name: "grpc", Port: int32(annotatedSecondPort)}}}, - }, - deployment: deploymentWithMultipleMeshPorts, - expected: ports, - }, - } - - for _, c := range testCases { - t.Run(c.name, func(t *testing.T) { - meshPorts := GetMeshPortsForDeployments(c.clusterName, &c.service, &c.deployment) - if !reflect.DeepEqual(meshPorts, c.expected) { - t.Errorf("Wanted meshPorts: %v, got: %v", c.expected, meshPorts) - } - }) - } -} - func TestValidateConfigmapBeforePutting(t *testing.T) { legalStore := ServiceEntryAddressStore{ diff --git a/admiral/pkg/controller/common/common.go b/admiral/pkg/controller/common/common.go index 9f61474e..6127e7d5 100644 --- a/admiral/pkg/controller/common/common.go +++ b/admiral/pkg/controller/common/common.go @@ -18,6 +18,8 @@ import ( v12 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1" + "github.com/istio-ecosystem/admiral/admiral/pkg/util" + "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" k8sAppsV1 "k8s.io/api/apps/v1" @@ -27,6 +29,8 @@ import ( var ( CtxLogFormat = "task=%v name=%v namespace=%s cluster=%s message=%v" CtxLogFormatWithTime = "task=%v name=%v namespace=%s cluster=%s message=%v txTime=%v" + LogFormatAdv = "op=%v type=%v name=%v namespace=%s cluster=%s message=%v" + LogErrFormat = "op=%v type=%v name=%v cluster=%v error=%v" ConfigWriter = "ConfigWriter" ) @@ -619,3 +623,82 @@ func IsPresent(s []string, e string) bool { func IsAirEnv(originalEnvLabel string) bool { return strings.HasSuffix(originalEnvLabel, AIREnvSuffix) } + +func GetMeshPortsForDeployments(clusterName string, destService *k8sV1.Service, + destDeployment *k8sAppsV1.Deployment) map[string]uint32 { + + if destService == nil || destDeployment == nil { + logrus.Warnf("Deployment or Service is nil cluster=%s", clusterName) + return nil + } + + var meshPorts string + if destDeployment.Spec.Template.Annotations == nil { + meshPorts = "" + } else { + meshPorts = destDeployment.Spec.Template.Annotations[SidecarEnabledPorts] + } + ports := GetMeshPortsHelper(meshPorts, destService, clusterName) + return ports +} + +func GetMeshPortsHelper(meshPorts string, destService *k8sV1.Service, clusterName string) map[string]uint32 { + var ports = make(map[string]uint32) + + if destService == nil { + return ports + } + if len(meshPorts) == 0 { + logrus.Infof(LogFormatAdv, "GetMeshPorts", "service", destService.Name, destService.Namespace, + clusterName, "No mesh ports present, defaulting to first port") + if destService.Spec.Ports != nil && len(destService.Spec.Ports) > 0 { + var protocol = util.GetPortProtocol(destService.Spec.Ports[0].Name) + ports[protocol] = uint32(destService.Spec.Ports[0].Port) + } + return ports + } + + meshPortsSplit := strings.Split(meshPorts, ",") + + if len(meshPortsSplit) > 1 { + logrus.Warnf(LogErrFormat, "Get", "MeshPorts", "", clusterName, + "Multiple inbound mesh ports detected, admiral generates service entry with first matched port and protocol") + } + + //fetch the first valid port if there is more than one mesh port + var meshPortMap = make(map[uint32]uint32) + for _, meshPort := range meshPortsSplit { + port, err := strconv.ParseUint(meshPort, 10, 32) + if err == nil { + meshPortMap[uint32(port)] = uint32(port) + break + } + } + for _, servicePort := range destService.Spec.Ports { + //handling relevant protocols from here: + // https://istio.io/latest/docs/ops/configuration/traffic-management/protocol-selection/#manual-protocol-selection + //use target port if present to match the annotated mesh port + targetPort := uint32(servicePort.Port) + if servicePort.TargetPort.StrVal != "" { + port, err := strconv.Atoi(servicePort.TargetPort.StrVal) + if err != nil { + logrus.Warnf(LogErrFormat, "GetMeshPorts", "Failed to parse TargetPort", destService.Name, clusterName, err) + } + if port > 0 { + targetPort = uint32(port) + } + + } + if servicePort.TargetPort.IntVal != 0 { + targetPort = uint32(servicePort.TargetPort.IntVal) + } + if _, ok := meshPortMap[targetPort]; ok { + var protocol = util.GetPortProtocol(servicePort.Name) + logrus.Infof(LogFormatAdv, "MeshPort", servicePort.Port, destService.Name, destService.Namespace, + clusterName, "Protocol: "+protocol) + ports[protocol] = uint32(servicePort.Port) + break + } + } + return ports +} diff --git a/admiral/pkg/controller/common/common_test.go b/admiral/pkg/controller/common/common_test.go index 0a6b2828..2209b0be 100644 --- a/admiral/pkg/controller/common/common_test.go +++ b/admiral/pkg/controller/common/common_test.go @@ -5,6 +5,7 @@ import ( "context" "errors" "reflect" + "strconv" "strings" "testing" "time" @@ -15,16 +16,15 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1" - v12 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1" + admiralV1Alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1" k8sAppsV1 "k8s.io/api/apps/v1" k8sCoreV1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - //v1admiral "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1" + "k8s.io/apimachinery/pkg/util/intstr" ) -var ignoreUnexported = cmpopts.IgnoreUnexported(v12.GlobalTrafficPolicy{}.Status) +var ignoreUnexported = cmpopts.IgnoreUnexported(admiralV1Alpha1.GlobalTrafficPolicy{}.Status) func init() { initConfig(false, false) @@ -61,7 +61,7 @@ func initConfig(fqdn bool, fqdnLocal bool) { } func TestGetTrafficConfigTransactionID(t *testing.T) { - tc := admiralv1.TrafficConfig{ + tc := admiralV1Alpha1.TrafficConfig{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"transactionID": ""}, Annotations: map[string]string{ "transactionID": "123456", @@ -72,7 +72,7 @@ func TestGetTrafficConfigTransactionID(t *testing.T) { } func TestGetTrafficConfigRevision(t *testing.T) { - tc := admiralv1.TrafficConfig{ + tc := admiralV1Alpha1.TrafficConfig{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"revisionNumber": ""}, Annotations: map[string]string{ "revisionNumber": "123456", @@ -83,7 +83,7 @@ func TestGetTrafficConfigRevision(t *testing.T) { } func TestGetTrafficConfigIdentity(t *testing.T) { - tc := admiralv1.TrafficConfig{ + tc := admiralV1Alpha1.TrafficConfig{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"asset": ""}, Annotations: map[string]string{ "asset": "123456", @@ -100,8 +100,8 @@ func TestGetSAN(t *testing.T) { identifierVal := "company.platform.server" domain := "preprd" - deployment := k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{identifier: identifierVal}}}}} - deploymentWithAnnotation := k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal}}}}} + deployment := k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{identifier: identifierVal}}}}} + deploymentWithAnnotation := k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal}}}}} deploymentWithNoIdentifier := k8sAppsV1.Deployment{} @@ -161,25 +161,25 @@ func TestGetCname(t *testing.T) { }{ { name: "should return valid cname (from label)", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}}, expected: strings.ToLower("stage." + identifierVal + ".global"), }, { name: "should return valid cname (from label) uses case sensitive DNS annotation -enabled", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{"admiral.io/cname-case-sensitive": "true"}, Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{"admiral.io/cname-case-sensitive": "true"}, Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}}, expected: "stage." + identifierVal + ".global", }, { name: "should return valid cname (from label) uses case sensitive DNS annotation -disabled", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{"admiral.io/cname-case-sensitive": "false"}, Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{"admiral.io/cname-case-sensitive": "false"}, Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}}, expected: strings.ToLower("stage." + identifierVal + ".global"), }, { name: "should return valid cname (from annotation)", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal}, Labels: map[string]string{"env": "stage"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal}, Labels: map[string]string{"env": "stage"}}}}}, expected: strings.ToLower("stage." + identifierVal + ".global"), }, { name: "should return empty string", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"env": "stage"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"env": "stage"}}}}}, expected: "", }, } @@ -250,12 +250,12 @@ func TestNodeLocality(t *testing.T) { }{ { name: "should return valid node region", - node: k8sCoreV1.Node{Spec: k8sCoreV1.NodeSpec{}, ObjectMeta: v1.ObjectMeta{Labels: map[string]string{NodeRegionLabel: nodeLocalityLabel}}}, + node: k8sCoreV1.Node{Spec: k8sCoreV1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{NodeRegionLabel: nodeLocalityLabel}}}, expected: nodeLocalityLabel, }, { name: "should return empty value when node annotation isn't present", - node: k8sCoreV1.Node{Spec: k8sCoreV1.NodeSpec{}, ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{}}}, + node: k8sCoreV1.Node{Spec: k8sCoreV1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}}}, expected: "", }, } @@ -283,25 +283,25 @@ func TestGetDeploymentGlobalIdentifier(t *testing.T) { }{ { name: "should return valid identifier from label", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}}, expected: identifierVal, originalex: identifierVal, }, { name: "should return valid identifier from annotations", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal, "env": "stage"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal, "env": "stage"}}}}}, expected: identifierVal, originalex: identifierVal, }, { name: "should return partitioned identifier", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal, "env": "stage", "admiral.io/identityPartition": "pid"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal, "env": "stage", "admiral.io/identityPartition": "pid"}}}}}, expected: "pid." + identifierVal, originalex: identifierVal, }, { name: "should return empty identifier", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{}, Annotations: map[string]string{}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}, Annotations: map[string]string{}}}}}, expected: "", originalex: "", }, @@ -333,17 +333,17 @@ func TestGetDeploymentIdentityPartition(t *testing.T) { }{ { name: "should return valid identifier from label", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{partitionIdentifier: identifierVal, "env": "stage"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{partitionIdentifier: identifierVal, "env": "stage"}}}}}, expected: identifierVal, }, { name: "should return valid identifier from annotations", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{partitionIdentifier: identifierVal, "env": "stage"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{partitionIdentifier: identifierVal, "env": "stage"}}}}}, expected: identifierVal, }, { name: "should return empty identifier", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{}, Annotations: map[string]string{}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}, Annotations: map[string]string{}}}}}, expected: "", }, } @@ -370,17 +370,17 @@ func TestGetPodGlobalIdentifier(t *testing.T) { }{ { name: "should return valid identifier from label", - pod: k8sCoreV1.Pod{Spec: k8sCoreV1.PodSpec{}, ObjectMeta: v1.ObjectMeta{Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}, + pod: k8sCoreV1.Pod{Spec: k8sCoreV1.PodSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}, expected: identifierVal, }, { name: "should return valid identifier from annotation", - pod: k8sCoreV1.Pod{Spec: k8sCoreV1.PodSpec{}, ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal, "env": "stage"}}}, + pod: k8sCoreV1.Pod{Spec: k8sCoreV1.PodSpec{}, ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal, "env": "stage"}}}, expected: identifierVal, }, { name: "should return empty identifier", - pod: k8sCoreV1.Pod{Spec: k8sCoreV1.PodSpec{}, ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{}, Labels: map[string]string{}}}, + pod: k8sCoreV1.Pod{Spec: k8sCoreV1.PodSpec{}, ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}, Labels: map[string]string{}}}, expected: "", }, } @@ -404,32 +404,32 @@ func TestGetEnv(t *testing.T) { }{ { name: "should return default env", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}}}}, expected: Default, }, { name: "should return valid env from label", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{}, Labels: map[string]string{"env": "stage2"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}, Labels: map[string]string{"env": "stage2"}}}}}, expected: "stage2", }, { name: "should return valid env from new annotation", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{"admiral.io/env": "stage1"}, Labels: map[string]string{"env": "stage2"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{"admiral.io/env": "stage1"}, Labels: map[string]string{"env": "stage2"}}}}}, expected: "stage1", }, { name: "should return valid env from new label", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{}, Labels: map[string]string{"admiral.io/env": "production", "env": "stage2"}}}}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}, Labels: map[string]string{"admiral.io/env": "production", "env": "stage2"}}}}}, expected: "production", }, { name: "should return env from namespace suffix", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{}}}}, ObjectMeta: v1.ObjectMeta{Namespace: "uswest2-prd"}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}}}, ObjectMeta: metav1.ObjectMeta{Namespace: "uswest2-prd"}}, expected: "prd", }, { name: "should return default when namespace doesn't have blah..region-env format", - deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{}}}}, ObjectMeta: v1.ObjectMeta{Namespace: "sample"}}, + deployment: k8sAppsV1.Deployment{Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}}}, ObjectMeta: metav1.ObjectMeta{Namespace: "sample"}}, expected: Default, }, } @@ -446,34 +446,34 @@ func TestGetEnv(t *testing.T) { func TestGetGtpEnv(t *testing.T) { - envNewAnnotationGtp := v12.GlobalTrafficPolicy{} - envNewAnnotationGtp.CreationTimestamp = v1.Now() + envNewAnnotationGtp := admiralV1Alpha1.GlobalTrafficPolicy{} + envNewAnnotationGtp.CreationTimestamp = metav1.Now() envNewAnnotationGtp.Labels = map[string]string{"identity": "app1", "admiral.io/env": "stage1"} envNewAnnotationGtp.Annotations = map[string]string{"admiral.io/env": "production"} envNewAnnotationGtp.Namespace = "namespace" envNewAnnotationGtp.Name = "myGTP-new-annotation" - envNewLabelGtp := v12.GlobalTrafficPolicy{} - envNewLabelGtp.CreationTimestamp = v1.Now() + envNewLabelGtp := admiralV1Alpha1.GlobalTrafficPolicy{} + envNewLabelGtp.CreationTimestamp = metav1.Now() envNewLabelGtp.Labels = map[string]string{"identity": "app1", "admiral.io/env": "stage1", "env": "stage2"} envNewLabelGtp.Namespace = "namespace" envNewLabelGtp.Name = "myGTP-new-label" - envLabelGtp := v12.GlobalTrafficPolicy{} - envLabelGtp.CreationTimestamp = v1.Now() + envLabelGtp := admiralV1Alpha1.GlobalTrafficPolicy{} + envLabelGtp.CreationTimestamp = metav1.Now() envLabelGtp.Labels = map[string]string{"identity": "app1", "env": "stage2"} envLabelGtp.Namespace = "namespace" envLabelGtp.Name = "myGTP-label" - noEnvGtp := v12.GlobalTrafficPolicy{} - noEnvGtp.CreationTimestamp = v1.Now() + noEnvGtp := admiralV1Alpha1.GlobalTrafficPolicy{} + noEnvGtp.CreationTimestamp = metav1.Now() noEnvGtp.Labels = map[string]string{"identity": "app1"} noEnvGtp.Namespace = "namespace" noEnvGtp.Name = "myGTP-no-env" testCases := []struct { name string - gtp *v12.GlobalTrafficPolicy + gtp *admiralV1Alpha1.GlobalTrafficPolicy expectedEnv string }{ { @@ -511,27 +511,27 @@ func TestGetGtpEnv(t *testing.T) { func TestGetRoutingPolicyEnv(t *testing.T) { - envNewAnnotationRP := v12.RoutingPolicy{} - envNewAnnotationRP.CreationTimestamp = v1.Now() + envNewAnnotationRP := admiralV1Alpha1.RoutingPolicy{} + envNewAnnotationRP.CreationTimestamp = metav1.Now() envNewAnnotationRP.Labels = map[string]string{"identity": "app1", "admiral.io/env": "stage1"} envNewAnnotationRP.Annotations = map[string]string{"identity": "app1", "admiral.io/env": "stage1"} envNewAnnotationRP.Namespace = "namespace" envNewAnnotationRP.Name = "myRP-new-annotation" - envLabelRP := v12.RoutingPolicy{} - envLabelRP.CreationTimestamp = v1.Now() + envLabelRP := admiralV1Alpha1.RoutingPolicy{} + envLabelRP.CreationTimestamp = metav1.Now() envLabelRP.Labels = map[string]string{"admiral.io/env": "stage1", "env": "stage2"} envLabelRP.Namespace = "namespace" envLabelRP.Name = "myRP-label" - noEnvRP := v12.RoutingPolicy{} - noEnvRP.CreationTimestamp = v1.Now() + noEnvRP := admiralV1Alpha1.RoutingPolicy{} + noEnvRP.CreationTimestamp = metav1.Now() noEnvRP.Namespace = "namespace" noEnvRP.Name = "myRP-no-env" testCases := []struct { name string - rp *v12.RoutingPolicy + rp *admiralV1Alpha1.RoutingPolicy expectedEnv string }{ { @@ -564,15 +564,15 @@ func TestGetRoutingPolicyEnv(t *testing.T) { func TestGetGtpIdentity(t *testing.T) { - gtpIdentityFromLabels := v12.GlobalTrafficPolicy{} - gtpIdentityFromLabels.CreationTimestamp = v1.Now() + gtpIdentityFromLabels := admiralV1Alpha1.GlobalTrafficPolicy{} + gtpIdentityFromLabels.CreationTimestamp = metav1.Now() gtpIdentityFromLabels.Labels = map[string]string{"identity": "app1", "admiral.io/env": "stage1"} gtpIdentityFromLabels.Annotations = map[string]string{"admiral.io/env": "production"} gtpIdentityFromLabels.Namespace = "namespace" gtpIdentityFromLabels.Name = "myGTP" - gtpIdenityFromSelector := v12.GlobalTrafficPolicy{} - gtpIdenityFromSelector.CreationTimestamp = v1.Now() + gtpIdenityFromSelector := admiralV1Alpha1.GlobalTrafficPolicy{} + gtpIdenityFromSelector.CreationTimestamp = metav1.Now() gtpIdenityFromSelector.Labels = map[string]string{"admiral.io/env": "stage1", "env": "stage2"} gtpIdenityFromSelector.Spec.Selector = map[string]string{"identity": "app2", "admiral.io/env": "stage1", "env": "stage2"} gtpIdenityFromSelector.Namespace = "namespace" @@ -580,7 +580,7 @@ func TestGetGtpIdentity(t *testing.T) { testCases := []struct { name string - gtp *v12.GlobalTrafficPolicy + gtp *admiralV1Alpha1.GlobalTrafficPolicy expectedIdentity string }{ { @@ -607,23 +607,23 @@ func TestGetGtpIdentity(t *testing.T) { } func TestIsServiceMatch(t *testing.T) { - matchingSelector := v1.LabelSelector{} + matchingSelector := metav1.LabelSelector{} matchingSelector.MatchLabels = map[string]string{"app": "app1", "asset": "asset1"} matchingServiceSelector := map[string]string{"app": "app1", "asset": "asset1"} - nonMatchingSelector := v1.LabelSelector{} + nonMatchingSelector := metav1.LabelSelector{} nonMatchingSelector.MatchLabels = map[string]string{"app": "app1", "asset": "asset1"} nonMatchingServiceSelector := map[string]string{"app": "app2", "asset": "asset1"} - nilSelector := v1.LabelSelector{} + nilSelector := metav1.LabelSelector{} nonNilServiceSelector := map[string]string{"app": "app1", "asset": "asset1"} - nonNilSelector := v1.LabelSelector{} + nonNilSelector := metav1.LabelSelector{} nonNilSelector.MatchLabels = map[string]string{"app": "app1", "asset": "asset1"} nilServiceSelector := map[string]string{} testCases := []struct { name string - selector *v1.LabelSelector + selector *metav1.LabelSelector serviceSelector map[string]string expectedBool bool }{ @@ -664,7 +664,7 @@ func TestIsServiceMatch(t *testing.T) { } func TestGetRoutingPolicyIdentity(t *testing.T) { - rp := &admiralv1.RoutingPolicy{ + rp := &admiralV1Alpha1.RoutingPolicy{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ "routingPolicy": "test-policy", @@ -712,7 +712,7 @@ func TestGetRoutingPolicy(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - rp := &admiralv1.RoutingPolicy{ + rp := &admiralV1Alpha1.RoutingPolicy{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test-ns", Labels: tc.labels, @@ -778,23 +778,23 @@ func TestAppendError(t *testing.T) { func TestGetODIdentity(t *testing.T) { type args struct { - od *admiralv1.OutlierDetection + od *admiralV1Alpha1.OutlierDetection } - test1od := &admiralv1.OutlierDetection{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + test1od := &admiralV1Alpha1.OutlierDetection{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: model.OutlierDetection{}, - Status: v12.OutlierDetectionStatus{}, + Status: admiralV1Alpha1.OutlierDetectionStatus{}, } test1od.Labels = make(map[string]string) test1od.Labels["identity"] = "foo" - test2od := &admiralv1.OutlierDetection{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + test2od := &admiralV1Alpha1.OutlierDetection{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: model.OutlierDetection{}, - Status: v12.OutlierDetectionStatus{}, + Status: admiralV1Alpha1.OutlierDetectionStatus{}, } test2od.Labels = make(map[string]string) tests := []struct { @@ -814,40 +814,40 @@ func TestGetODIdentity(t *testing.T) { func TestGetODEnv(t *testing.T) { type args struct { - od *admiralv1.OutlierDetection + od *admiralV1Alpha1.OutlierDetection } - test1od := &admiralv1.OutlierDetection{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + test1od := &admiralV1Alpha1.OutlierDetection{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: model.OutlierDetection{}, - Status: v12.OutlierDetectionStatus{}, + Status: admiralV1Alpha1.OutlierDetectionStatus{}, } test1od.Labels = make(map[string]string) - test2od := &admiralv1.OutlierDetection{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + test2od := &admiralV1Alpha1.OutlierDetection{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: model.OutlierDetection{}, - Status: v12.OutlierDetectionStatus{}, + Status: admiralV1Alpha1.OutlierDetectionStatus{}, } test2od.Annotations = make(map[string]string) test2od.Annotations["admiral.io/env"] = "fooAnnotation" - test3od := &admiralv1.OutlierDetection{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + test3od := &admiralV1Alpha1.OutlierDetection{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: model.OutlierDetection{}, - Status: v12.OutlierDetectionStatus{}, + Status: admiralV1Alpha1.OutlierDetectionStatus{}, } test3od.Labels = make(map[string]string) test3od.Labels["admiral.io/env"] = "fooLabel" - test4od := &admiralv1.OutlierDetection{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + test4od := &admiralV1Alpha1.OutlierDetection{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: model.OutlierDetection{}, - Status: v12.OutlierDetectionStatus{}, + Status: admiralV1Alpha1.OutlierDetectionStatus{}, } selector := make(map[string]string) @@ -874,7 +874,7 @@ func TestGetODEnv(t *testing.T) { } func TestCheckIFEnvLabelIsPresentEnvValueEmpty(t *testing.T) { - tc := admiralv1.TrafficConfig{ + tc := admiralV1Alpha1.TrafficConfig{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"env": ""}, Annotations: map[string]string{ "asset": "123456", @@ -885,7 +885,7 @@ func TestCheckIFEnvLabelIsPresentEnvValueEmpty(t *testing.T) { } func TestCheckIFEnvLabelIsPresentLabelsMissing(t *testing.T) { - tc := admiralv1.TrafficConfig{ + tc := admiralV1Alpha1.TrafficConfig{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ "asset": "123456", @@ -896,7 +896,7 @@ func TestCheckIFEnvLabelIsPresentLabelsMissing(t *testing.T) { } func TestCheckIFEnvLabelIsPresentSuccess(t *testing.T) { - tc := admiralv1.TrafficConfig{ + tc := admiralV1Alpha1.TrafficConfig{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"env": "qal"}, Annotations: map[string]string{ "asset": "123456", @@ -950,7 +950,7 @@ func TestCheckIFEnvLabelIsPresentSuccess(t *testing.T) { func TestGenerateTxId(t *testing.T) { type args struct { - meta v1.Object + meta metav1.Object ctrlName string id string } @@ -1010,6 +1010,141 @@ func TestGenerateTxId(t *testing.T) { } } +func TestGetMeshPorts(t *testing.T) { + var ( + annotatedPort = 8090 + annotatedSecondPort = 8091 + defaultServicePort = uint32(8080) + ports = map[string]uint32{"http": uint32(annotatedPort)} + portsDiffTargetPort = map[string]uint32{"http": uint32(80)} + grpcPorts = map[string]uint32{"grpc": uint32(annotatedPort)} + grpcWebPorts = map[string]uint32{"grpc-web": uint32(annotatedPort)} + http2Ports = map[string]uint32{"http2": uint32(annotatedPort)} + portsFromDefaultSvcPort = map[string]uint32{"http": defaultServicePort} + emptyPorts = map[string]uint32{} + defaultK8sSvcPortNoName = k8sCoreV1.ServicePort{Port: int32(defaultServicePort)} + defaultK8sSvcPort = k8sCoreV1.ServicePort{Name: "default", Port: int32(defaultServicePort)} + meshK8sSvcPort = k8sCoreV1.ServicePort{Name: "mesh", Port: int32(annotatedPort)} + serviceMeshPorts = []k8sCoreV1.ServicePort{defaultK8sSvcPort, meshK8sSvcPort} + serviceMeshPortsOnlyDefault = []k8sCoreV1.ServicePort{defaultK8sSvcPortNoName} + service = k8sCoreV1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, + Spec: k8sCoreV1.ServiceSpec{Ports: serviceMeshPorts}, + } + deployment = k8sAppsV1.Deployment{ + Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{SidecarEnabledPorts: strconv.Itoa(annotatedPort)}}, + }}} + deploymentWithMultipleMeshPorts = k8sAppsV1.Deployment{ + Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{SidecarEnabledPorts: strconv.Itoa(annotatedPort) + "," + strconv.Itoa(annotatedSecondPort)}}, + }}} + ) + + testCases := []struct { + name string + clusterName string + service k8sCoreV1.Service + deployment k8sAppsV1.Deployment + expected map[string]uint32 + }{ + { + name: "should return a port based on annotation", + service: service, + deployment: deployment, + expected: ports, + }, + { + name: "should return a http port if no port name is specified", + service: k8sCoreV1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, + Spec: k8sCoreV1.ServiceSpec{Ports: []k8sCoreV1.ServicePort{{Port: int32(80), TargetPort: intstr.FromInt(annotatedPort)}}}, + }, + deployment: deployment, + expected: portsDiffTargetPort, + }, + { + name: "should return a http port if the port name doesn't start with a protocol name", + service: k8sCoreV1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, + Spec: k8sCoreV1.ServiceSpec{Ports: []k8sCoreV1.ServicePort{{Name: "hello-grpc", Port: int32(annotatedPort)}}}, + }, + deployment: deployment, + expected: ports, + }, + { + name: "should return a grpc port based on annotation", + service: k8sCoreV1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, + Spec: k8sCoreV1.ServiceSpec{Ports: []k8sCoreV1.ServicePort{{Name: "grpc-service", Port: int32(annotatedPort)}}}, + }, + deployment: deployment, + expected: grpcPorts, + }, + { + name: "should return a grpc-web port based on annotation", + service: k8sCoreV1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, + Spec: k8sCoreV1.ServiceSpec{Ports: []k8sCoreV1.ServicePort{{Name: "grpc-web", Port: int32(annotatedPort)}}}, + }, + deployment: deployment, + expected: grpcWebPorts, + }, + { + name: "should return a http2 port based on annotation", + service: k8sCoreV1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, + Spec: k8sCoreV1.ServiceSpec{Ports: []k8sCoreV1.ServicePort{{Name: "http2", Port: int32(annotatedPort)}}}, + }, + deployment: deployment, + expected: http2Ports, + }, + { + name: "should return a default port", + service: k8sCoreV1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, + Spec: k8sCoreV1.ServiceSpec{Ports: serviceMeshPortsOnlyDefault}, + }, + deployment: k8sAppsV1.Deployment{ + Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}}, + }}}, + expected: portsFromDefaultSvcPort, + }, + { + name: "should return empty ports", + service: k8sCoreV1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, + Spec: k8sCoreV1.ServiceSpec{Ports: nil}, + }, + deployment: k8sAppsV1.Deployment{ + Spec: k8sAppsV1.DeploymentSpec{Template: k8sCoreV1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}}, + }}}, + expected: emptyPorts, + }, + { + name: "should return a http port if the port name doesn't start with a protocol name", + service: k8sCoreV1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, + Spec: k8sCoreV1.ServiceSpec{Ports: []k8sCoreV1.ServicePort{{Name: "http", Port: int32(annotatedPort)}, + {Name: "grpc", Port: int32(annotatedSecondPort)}}}, + }, + deployment: deploymentWithMultipleMeshPorts, + expected: ports, + }, + } + + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + meshPorts := GetMeshPortsForDeployments(c.clusterName, &c.service, &c.deployment) + if !reflect.DeepEqual(meshPorts, c.expected) { + t.Errorf("Wanted meshPorts: %v, got: %v", c.expected, meshPorts) + } + }) + } +} + func TestGetGtpIdentityPartition(t *testing.T) { initConfig(true, true) partitionIdentifier := "admiral.io/identityPartition" @@ -1017,22 +1152,22 @@ func TestGetGtpIdentityPartition(t *testing.T) { testCases := []struct { name string - gtp v12.GlobalTrafficPolicy + gtp admiralV1Alpha1.GlobalTrafficPolicy expected string }{ { name: "should return valid identifier from label", - gtp: v12.GlobalTrafficPolicy{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{partitionIdentifier: identifierVal, "env": "stage"}}}, + gtp: admiralV1Alpha1.GlobalTrafficPolicy{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{partitionIdentifier: identifierVal, "env": "stage"}}}, expected: identifierVal, }, { name: "should return valid identifier from annotations", - gtp: v12.GlobalTrafficPolicy{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{partitionIdentifier: identifierVal, "env": "stage"}}}, + gtp: admiralV1Alpha1.GlobalTrafficPolicy{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{partitionIdentifier: identifierVal, "env": "stage"}}}, expected: identifierVal, }, { name: "should return empty identifier", - gtp: v12.GlobalTrafficPolicy{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{}, Annotations: map[string]string{}}}, + gtp: admiralV1Alpha1.GlobalTrafficPolicy{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{}, Annotations: map[string]string{}}}, expected: "", }, } diff --git a/admiral/pkg/controller/util/migration.go b/admiral/pkg/controller/util/migration.go index 0357afeb..d5087b72 100644 --- a/admiral/pkg/controller/util/migration.go +++ b/admiral/pkg/controller/util/migration.go @@ -13,13 +13,14 @@ import ( func UpdateEndpointsForDeployToRolloutMigration(serviceInstance map[string]*k8sV1.Service, serviceEntry *networking.ServiceEntry, meshPorts map[string]map[string]uint32, clusterIngress string, clusterAppDeleteMap map[string]string, clusterName string, - clusterDeployRolloutPresent map[string]map[string]bool) error { + clusterDeployRolloutPresent map[string]map[string]bool) ([]*k8sV1.Service, error) { if serviceInstance[common.Deployment] == nil || serviceInstance[common.Rollout] == nil { - return fmt.Errorf("serviceInstance for Deployment/Rollout is nil as the service cache has not updated yet") + return nil, fmt.Errorf("serviceInstance for Deployment/Rollout is nil as the service cache has not updated yet") } deployLocalFqdn := serviceInstance[common.Deployment].Name + common.Sep + serviceInstance[common.Deployment].Namespace + common.GetLocalDomainSuffix() rolloutFqdn := serviceInstance[common.Rollout].Name + common.Sep + serviceInstance[common.Rollout].Namespace + common.GetLocalDomainSuffix() + var requiredServices []*k8sV1.Service var uniqueEndpointsList []*networking.WorkloadEntry for _, ep := range serviceEntry.Endpoints { @@ -36,6 +37,7 @@ func UpdateEndpointsForDeployToRolloutMigration(serviceInstance map[string]*k8sV Labels: map[string]string{"type": common.Deployment}, } uniqueEndpointsList = append(uniqueEndpointsList, deployEp) + requiredServices = append(requiredServices, serviceInstance[common.Deployment]) } if clusterAppDeleteMap[clusterName] != common.Rollout && clusterDeployRolloutPresent[clusterName][common.Rollout] { @@ -46,8 +48,11 @@ func UpdateEndpointsForDeployToRolloutMigration(serviceInstance map[string]*k8sV Labels: map[string]string{"type": common.Rollout}, } uniqueEndpointsList = append(uniqueEndpointsList, rolloutEp) + requiredServices = append(requiredServices, serviceInstance[common.Rollout]) } } else { + // TODO: check when will this be applicable, and then + // update the required service accordingly ep.Labels = nil uniqueEndpointsList = append(uniqueEndpointsList, ep) } @@ -55,5 +60,5 @@ func UpdateEndpointsForDeployToRolloutMigration(serviceInstance map[string]*k8sV serviceEntry.Endpoints = uniqueEndpointsList - return nil + return requiredServices, nil } diff --git a/admiral/pkg/controller/util/migration_test.go b/admiral/pkg/controller/util/migration_test.go index aee81971..fde978bb 100644 --- a/admiral/pkg/controller/util/migration_test.go +++ b/admiral/pkg/controller/util/migration_test.go @@ -260,7 +260,7 @@ func TestUpdateEndpointsForDeployToRolloutMigration(t *testing.T) { for _, c := range testCases { t.Run(c.name, func(t *testing.T) { - err := UpdateEndpointsForDeployToRolloutMigration(c.serviceInstance, c.serviceEntry, meshPorts, c.clusterIngress, c.clusterAppDeleteMap, clusterName, c.clusterDeployRolloutPresent) + _, err := UpdateEndpointsForDeployToRolloutMigration(c.serviceInstance, c.serviceEntry, meshPorts, c.clusterIngress, c.clusterAppDeleteMap, clusterName, c.clusterDeployRolloutPresent) assert.Equal(t, c.expectedErr, err) if err == nil { if !reflect.DeepEqual(c.expectedSeEndpoints, c.serviceEntry.Endpoints) { diff --git a/admiral/pkg/controller/util/util.go b/admiral/pkg/controller/util/util.go index e5ad56ce..2164c4eb 100644 --- a/admiral/pkg/controller/util/util.go +++ b/admiral/pkg/controller/util/util.go @@ -57,16 +57,16 @@ func LogElapsedTimeController(logger *log.Entry, logMessage string) func() { } } -func LogElapsedTimeForModifySE(logger *log.Entry, op, name, namespace, cluster, message string) func() { +func LogElapsedTimeForTask(logger *log.Entry, op, name, namespace, cluster, message string) func() { start := time.Now() return func() { - LogElapsedTimeSinceForModifySE(logger, op, name, namespace, cluster, message, start) + LogElapsedTimeSinceTask(logger, op, name, namespace, cluster, message, start) } } -func LogElapsedTimeSinceForModifySE(logger *log.Entry, op, name, namespace, cluster, message string, start time.Time) { - // op=%v name=%v namespace=%s cluster=%s message=%v txId=%v - logger.Infof(common.CtxLogFormatWithTime, op, name, namespace, cluster, message, time.Since(start).Milliseconds()) +func LogElapsedTimeSinceTask(logger *log.Entry, task, name, namespace, cluster, message string, start time.Time) { + // task=%v name=%v namespace=%s cluster=%s message=%v txId=%v + logger.Infof(common.CtxLogFormatWithTime, task, name, namespace, cluster, message, time.Since(start).Milliseconds()) } func LogElapsedTimeSince(op, identity, env, clusterId string, start time.Time) { diff --git a/admiral/pkg/mocks/ConfigWriter.go b/admiral/pkg/mocks/ConfigWriter.go new file mode 100644 index 00000000..146725f9 --- /dev/null +++ b/admiral/pkg/mocks/ConfigWriter.go @@ -0,0 +1,52 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + clusters "github.com/istio-ecosystem/admiral/admiral/pkg/clusters" + + logrus "github.com/sirupsen/logrus" + + mock "github.com/stretchr/testify/mock" + + v1alpha3 "istio.io/api/networking/v1alpha3" +) + +// ConfigWriter is an autogenerated mock type for the ConfigWriter type +type ConfigWriter struct { + mock.Mock +} + +// AddServiceEntriesWithDrToAllCluster provides a mock function with given fields: ctxLogger, ctx, rr, sourceClusters, serviceEntries, isAdditionalEndpointsEnabled, isServiceEntryModifyCalledForSourceCluster, identityId, env +func (_m *ConfigWriter) AddServiceEntriesWithDrToAllCluster(ctxLogger *logrus.Entry, ctx context.Context, rr *clusters.RemoteRegistry, sourceClusters map[string]string, serviceEntries map[string]*v1alpha3.ServiceEntry, isAdditionalEndpointsEnabled bool, isServiceEntryModifyCalledForSourceCluster bool, identityId string, env string) error { + ret := _m.Called(ctxLogger, ctx, rr, sourceClusters, serviceEntries, isAdditionalEndpointsEnabled, isServiceEntryModifyCalledForSourceCluster, identityId, env) + + if len(ret) == 0 { + panic("no return value specified for AddServiceEntriesWithDrToAllCluster") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*logrus.Entry, context.Context, *clusters.RemoteRegistry, map[string]string, map[string]*v1alpha3.ServiceEntry, bool, bool, string, string) error); ok { + r0 = rf(ctxLogger, ctx, rr, sourceClusters, serviceEntries, isAdditionalEndpointsEnabled, isServiceEntryModifyCalledForSourceCluster, identityId, env) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewConfigWriter creates a new instance of ConfigWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConfigWriter(t interface { + mock.TestingT + Cleanup(func()) +}) *ConfigWriter { + mock := &ConfigWriter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/admiral/pkg/registry/clusterdentity_test.go b/admiral/pkg/registry/clusterIdentity_test.go similarity index 100% rename from admiral/pkg/registry/clusterdentity_test.go rename to admiral/pkg/registry/clusterIdentity_test.go diff --git a/admiral/pkg/registry/config.go b/admiral/pkg/registry/config.go new file mode 100644 index 00000000..10879d5e --- /dev/null +++ b/admiral/pkg/registry/config.go @@ -0,0 +1,58 @@ +package registry + +import ( + admiralV1Alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" + networking "istio.io/api/networking/v1alpha3" +) + +type IdentityConfig struct { + IdentityName string `json:"identityName"` + Clusters map[string]*IdentityConfigCluster `json:"clusters"` + ClientAssets map[string]string `json:"clientAssets"` +} + +func (config *IdentityConfig) PutClusterConfig(name string, clusterConfig IdentityConfigCluster) error { + return nil +} + +type IdentityConfigCluster struct { + Name string `json:"name"` + Locality string `json:"locality"` + IngressEndpoint string `json:"ingressEndpoint"` + IngressPort string `json:"ingressPort"` + IngressPortName string `json:"ingressPortName"` + Environment map[string]*IdentityConfigEnvironment `json:"environment"` +} + +func (config *IdentityConfigCluster) PutEnvironment(name string, environmentConfig IdentityConfigEnvironment) error { + return nil +} + +func (config *IdentityConfigCluster) PutClientAssets(clientAssets []string) error { + return nil +} + +type RegistryServiceConfig struct { + Name string `json:"name"` + Weight int `json:"weight"` + Ports map[string]uint32 `json:"ports"` +} + +type TrafficPolicy struct { + GlobalTrafficPolicy admiralV1Alpha1.GlobalTrafficPolicy `json:"globaltrafficpolicy"` + OutlierDetection admiralV1Alpha1.OutlierDetection `json:"outlierdetection"` + ClientConnectionConfig admiralV1Alpha1.ClientConnectionConfig `json:"clientconnectionconfig"` +} + +type IdentityConfigEnvironment struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + Services map[string]*RegistryServiceConfig `json:"services"` + ServiceName string `json:"serviceName"` + Type string `json:"type"` + Selectors map[string]string `json:"selectors"` + Ports []*networking.ServicePort `json:"ports"` + TrafficPolicy TrafficPolicy `json:"trafficPolicy"` + Event admiral.EventType `json:"event"` +} diff --git a/admiral/pkg/registry/configCache.go b/admiral/pkg/registry/configCache.go new file mode 100644 index 00000000..dc9e41eb --- /dev/null +++ b/admiral/pkg/registry/configCache.go @@ -0,0 +1,72 @@ +package registry + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strings" + "sync" + + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" +) + +type IdentityConfigCache interface { + Get(name string) (*IdentityConfig, error) + Update(name string, config *IdentityConfig, writer ConfigWriter) error + Remove(name string) error +} +type cacheHandler struct { + lock *sync.RWMutex + cache map[string]*IdentityConfig +} + +func NewConfigCache() *cacheHandler { + return &cacheHandler{ + lock: &sync.RWMutex{}, + cache: map[string]*IdentityConfig{}, + } +} + +func (handler *cacheHandler) Get(identityName string) (*IdentityConfig, error) { + defer handler.lock.RUnlock() + handler.lock.RLock() + config, ok := handler.cache[identityName] + if ok { + return config, nil + } + return nil, fmt.Errorf("record not found") +} + +func (handler *cacheHandler) Update(identityName string, config *IdentityConfig, writer ConfigWriter) error { + defer handler.lock.Unlock() + handler.lock.Lock() + handler.cache[identityName] = config + return writeToFileLocal(writer, config) +} + +func writeToFileLocal(writer ConfigWriter, config *IdentityConfig) error { + fmt.Printf("[state syncer] writing config to file assetname=%s", config.IdentityName) + shortAlias := strings.Split(config.IdentityName, ".") + currentDir, err := filepath.Abs("./") + if err != nil { + return err + } + pathName := currentDir + "/testdata/" + shortAlias[len(shortAlias)-1] + "IdentityConfiguration.json" + if common.GetSecretFilterTags() == "admiral/syncrtay" { + pathName = "/etc/serviceregistry/config/" + shortAlias[len(shortAlias)-1] + "IdentityConfiguration.json" + } + fmt.Printf("[state syncer] file path=%s", pathName) + bytes, _ := json.MarshalIndent(config, "", " ") + return writer.Write(pathName, bytes) +} + +func (handler *cacheHandler) Remove(identityName string) error { + defer handler.lock.Unlock() + handler.lock.Lock() + _, ok := handler.cache[identityName] + if ok { + delete(handler.cache, identityName) + return nil + } + return fmt.Errorf("config for %s does not exist", identityName) +} diff --git a/admiral/pkg/registry/configCache_test.go b/admiral/pkg/registry/configCache_test.go new file mode 100644 index 00000000..19c8ed09 --- /dev/null +++ b/admiral/pkg/registry/configCache_test.go @@ -0,0 +1,152 @@ +package registry + +import ( + "fmt" + "github.com/sirupsen/logrus" + "reflect" + "testing" +) + +func TestGet(t *testing.T) { + var ( + record1 = "record1" + record2 = "record2" + record2Config = &IdentityConfig{ + IdentityName: record2, + } + emptyHandler = NewConfigCache() + handlerWithRecord2 = NewConfigCache() + ) + handlerWithRecord2.Update(record2, record2Config, NewConfigMapWriter(nil, logrus.WithFields(logrus.Fields{ + "type": "storeWorkloadData", + }))) + cases := []struct { + name string + identityName string + handler *cacheHandler + expConfig *IdentityConfig + expErr error + }{ + { + name: "Given there is no record for '" + record1 + "', " + + "When the Get function is called, " + + "It should return an error", + identityName: record1, + handler: emptyHandler, + expErr: fmt.Errorf("record not found"), + }, + { + name: "Given there is a record for '" + record2 + "', " + + "When the Get function is called, " + + "It should return the record", + identityName: record2, + handler: handlerWithRecord2, + expConfig: record2Config, + expErr: nil, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + config, err := c.handler.Get(c.identityName) + if !reflect.DeepEqual(err, c.expErr) { + t.Errorf("want=%v, got=%v", c.expErr, err) + } + if !reflect.DeepEqual(config, c.expConfig) { + t.Errorf("want=%v, got=%v", c.expConfig, config) + } + }) + } +} + +func TestUpdate(t *testing.T) { + var ( + record1 = "record1" + record1Config = &IdentityConfig{ + IdentityName: record1, + } + emptyHandler = NewConfigCache() + ) + cases := []struct { + name string + identityName string + handler *cacheHandler + config *IdentityConfig + expErr error + }{ + { + name: "Given there is no record for '" + record1 + "', " + + "When the Get function is called, " + + "It should cache the config", + identityName: record1, + config: record1Config, + handler: emptyHandler, + expErr: nil, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + err := c.handler.Update(c.identityName, c.config, NewConfigMapWriter(nil, logrus.WithFields(logrus.Fields{ + "type": "storeWorkloadData", + }))) + if !reflect.DeepEqual(err, c.expErr) { + t.Errorf("want=%v, got=%v", c.expErr, err) + } + config, err := c.handler.Get(c.identityName) + if err != nil { + t.Errorf("want=nil, got=%v", err) + } + if !reflect.DeepEqual(config, c.config) { + t.Errorf("want=%v, got=%v", c.config, config) + } + }) + } +} + +func TestRemove(t *testing.T) { + var ( + record1 = "record1" + record2 = "record2" + record2Config = &IdentityConfig{ + IdentityName: record2, + } + emptyHandler = NewConfigCache() + handlerWithRecord2 = NewConfigCache() + ) + handlerWithRecord2.Update(record2, record2Config, NewConfigMapWriter(nil, logrus.WithFields(logrus.Fields{ + "type": "storeWorkloadData", + }))) + cases := []struct { + name string + identityName string + handler *cacheHandler + expErr error + }{ + { + name: "Given there is no record for '" + record1 + "', " + + "When the Remove function is called, " + + "It should return an error", + identityName: record1, + handler: emptyHandler, + expErr: fmt.Errorf("config for %s does not exist", record1), + }, + { + name: "Given there is a record for '" + record2 + "', " + + "When the Remove function is called, " + + "It should remove the record", + identityName: record2, + handler: handlerWithRecord2, + expErr: nil, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + err := c.handler.Remove(c.identityName) + if !reflect.DeepEqual(err, c.expErr) { + t.Errorf("want=%v, got=%v", c.expErr, err) + } + }) + } +} diff --git a/admiral/pkg/registry/configSyncer.go b/admiral/pkg/registry/configSyncer.go index df0e03c7..a21c656c 100644 --- a/admiral/pkg/registry/configSyncer.go +++ b/admiral/pkg/registry/configSyncer.go @@ -1,40 +1,88 @@ package registry -type ConfigSyncer interface { - SyncDeployment() error - SyncService() error - - // argo custom resources - SyncArgoRollout() error +import ( + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/util" + "github.com/sirupsen/logrus" + "sync" - // admiral custom resources - SyncGlobalTrafficPolicy() error - SyncClientConnectionConfigurations() error - SyncOutlierDetectionConfigurations() error -} + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" +) -type configSync struct{} +const ( + serviceRegistryIdentityConfigMapName = "service-registry-identityconfig" + admiralQaNs = "services-admiral-use2-qal" + secretLabel = "admiral/syncrtay" +) -func NewConfigSync() *configSync { - return &configSync{} +type ConfigSyncer interface { + UpdateEnvironmentConfigByCluster(ctxLogger *logrus.Entry, environment, cluster string, config IdentityConfig, writer ConfigWriter) error } -func (c *configSync) SyncDeployment() error { - return nil +type configSync struct { + cache map[string]IdentityConfigCache + lock *sync.RWMutex } -func (c *configSync) SyncService() error { - return nil +func NewConfigSync() ConfigSyncer { + return &configSync{ + lock: &sync.RWMutex{}, + cache: make(map[string]IdentityConfigCache), + } } -func (c *configSync) SyncArgoRollout() error { - return nil -} -func (c *configSync) SyncGlobalTrafficPolicy() error { - return nil -} -func (c *configSync) SyncClientConnectionConfigurations() error { - return nil -} -func (c *configSync) SyncOutlierDetectionConfigurations() error { - return nil + +func (c *configSync) UpdateEnvironmentConfigByCluster( + ctxLogger *logrus.Entry, + environment, + cluster string, + config IdentityConfig, + writer ConfigWriter) error { + var ( + task = "UpdateEnvironmentConfigByCluster" + clusterConfig = config.Clusters[cluster] + ) + defer util.LogElapsedTimeForTask(ctxLogger, task, config.IdentityName, "", "", "processingTime")() + ctxLogger.Infof(common.CtxLogFormat, task, config.IdentityName, "", "", "received") + defer c.lock.Unlock() + c.lock.Lock() + cache, ok := c.cache[config.IdentityName] + if ok { + ctxLogger.Infof(common.CtxLogFormat, task, config.IdentityName, "", cluster, "cache found") + // update cluster configuration + configCache, err := cache.Get(config.IdentityName) + if err != nil { + ctxLogger.Errorf(common.CtxLogFormat, task, config.IdentityName, "", cluster, "config not found in cache") + return err + } + if configCache.Clusters[cluster] != nil { + configCache.Clusters[cluster].IngressEndpoint = clusterConfig.IngressEndpoint + configCache.Clusters[cluster].IngressPort = clusterConfig.IngressPort + configCache.Clusters[cluster].IngressPortName = clusterConfig.IngressPortName + configCache.Clusters[cluster].Locality = clusterConfig.Locality + configCache.Clusters[cluster].Name = clusterConfig.Name + if configCache.Clusters[cluster].Environment == nil { + configCache.Clusters[cluster].Environment = map[string]*IdentityConfigEnvironment{} + } + configCache.Clusters[cluster].Environment[environment] = config.Clusters[cluster].Environment[environment] + ctxLogger.Infof(common.CtxLogFormat, task, config.IdentityName, "", cluster, "updating the cache") + return cache.Update(config.IdentityName, configCache, writer) + } + // cluster doesn't exist + configCache.Clusters[cluster] = &IdentityConfigCluster{ + Name: clusterConfig.Name, + Locality: clusterConfig.Locality, + IngressEndpoint: clusterConfig.IngressEndpoint, + IngressPort: clusterConfig.IngressPort, + IngressPortName: clusterConfig.IngressPortName, + Environment: map[string]*IdentityConfigEnvironment{ + environment: clusterConfig.Environment[environment], + }, + } + ctxLogger.Infof(common.CtxLogFormat, task, config.IdentityName, "", cluster, "updating the cache") + return cache.Update(config.IdentityName, configCache, writer) + } + ctxLogger.Infof(common.CtxLogFormat, task, config.IdentityName, "", cluster, "creating new cache") + // entry doesn't exist + c.cache[config.IdentityName] = NewConfigCache() + ctxLogger.Infof(common.CtxLogFormat, task, config.IdentityName, "", cluster, "updating the cache") + return c.cache[config.IdentityName].Update(config.IdentityName, &config, writer) } diff --git a/admiral/pkg/registry/configWriter.go b/admiral/pkg/registry/configWriter.go new file mode 100644 index 00000000..25d964a0 --- /dev/null +++ b/admiral/pkg/registry/configWriter.go @@ -0,0 +1,85 @@ +package registry + +import ( + "context" + "fmt" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/util" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + k8sV1 "k8s.io/api/core/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "os" + "strings" +) + +type ConfigWriter interface { + Write(name string, data []byte) error +} + +func NewConfigMapWriter(k8sClient kubernetes.Interface, ctxLogger *logrus.Entry) ConfigWriter { + return &configMapWriter{ + k8sClient: k8sClient, + ctxLogger: ctxLogger, + } +} + +type configMapWriter struct { + ctxLogger *logrus.Entry + k8sClient kubernetes.Interface +} + +func (c *configMapWriter) Write(name string, data []byte) error { + var ( + task = "ConfigMapWriter" + cm *k8sV1.ConfigMap + err error + ) + defer util.LogElapsedTimeForTask(c.ctxLogger, task, name, "", "", "processingTime")() + if common.GetSecretFilterTags() != secretLabel { + c.ctxLogger.Infof(common.CtxLogFormat, task, name, "", "", "writing to local file") + return writeToFile(name, data) + } + nameParts := strings.Split(name, "/") + configMapDataKeyName := nameParts[len(nameParts)-1] + c.ctxLogger.Infof(common.CtxLogFormat, task, name, "", "", "updating configmap="+serviceRegistryIdentityConfigMapName+" key="+configMapDataKeyName) + cm, err = c.k8sClient.CoreV1().ConfigMaps(admiralQaNs).Get(context.TODO(), serviceRegistryIdentityConfigMapName, metaV1.GetOptions{}) + if err != nil { + if k8sErrors.IsNotFound(err) { + // configmap doesn't exist. Create one + c.ctxLogger.Infof(common.CtxLogFormat, task, name, "", "", "configmap doesn't exist, creating one") + cm = &k8sV1.ConfigMap{ + ObjectMeta: metaV1.ObjectMeta{ + Name: serviceRegistryIdentityConfigMapName, + Namespace: admiralQaNs, + }, + } + } + c.ctxLogger.Warnf(common.CtxLogFormat, task, name, "", "", "error getting configmap, error="+err.Error()+" will still try to update it") + } + currentData := cm.Data + currentData[configMapDataKeyName] = string(data) + cm.Data = currentData + _, err = c.k8sClient.CoreV1().ConfigMaps(admiralQaNs).Update(context.TODO(), cm, metaV1.UpdateOptions{}) + if err != nil { + c.ctxLogger.Warnf(common.CtxLogFormat, task, name, "", "", "error getting configmap, error="+err.Error()+" will still try to update it") + return errors.Wrap(err, "failed to update configMap") + } + return nil +} + +func writeToFile(name string, data []byte) error { + f, err := os.Create(name) + if err != nil { + fmt.Printf("[state syncer] err=%v", err) + return err + } + _, err = f.Write(data) + if err != nil { + fmt.Printf("[state syncer] err=%v", err) + return err + } + return err +} diff --git a/admiral/pkg/registry/mocks/mock_ConfigSyncer.go b/admiral/pkg/registry/mocks/mock_ConfigSyncer.go new file mode 100644 index 00000000..6846e5ec --- /dev/null +++ b/admiral/pkg/registry/mocks/mock_ConfigSyncer.go @@ -0,0 +1,87 @@ +// Code generated by mockery v2.44.1. DO NOT EDIT. + +package mocks + +import ( + logrus "github.com/sirupsen/logrus" + mock "github.com/stretchr/testify/mock" + + registry "github.com/istio-ecosystem/admiral/admiral/pkg/registry" +) + +// MockConfigSyncer is an autogenerated mock type for the ConfigSyncer type +type MockConfigSyncer struct { + mock.Mock +} + +type MockConfigSyncer_Expecter struct { + mock *mock.Mock +} + +func (_m *MockConfigSyncer) EXPECT() *MockConfigSyncer_Expecter { + return &MockConfigSyncer_Expecter{mock: &_m.Mock} +} + +// UpdateEnvironmentConfigByCluster provides a mock function with given fields: ctxLogger, environment, cluster, config, writer +func (_m *MockConfigSyncer) UpdateEnvironmentConfigByCluster(ctxLogger *logrus.Entry, environment string, cluster string, config registry.IdentityConfig, writer registry.ConfigWriter) error { + ret := _m.Called(ctxLogger, environment, cluster, config, writer) + + if len(ret) == 0 { + panic("no return value specified for UpdateEnvironmentConfigByCluster") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*logrus.Entry, string, string, registry.IdentityConfig, registry.ConfigWriter) error); ok { + r0 = rf(ctxLogger, environment, cluster, config, writer) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockConfigSyncer_UpdateEnvironmentConfigByCluster_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateEnvironmentConfigByCluster' +type MockConfigSyncer_UpdateEnvironmentConfigByCluster_Call struct { + *mock.Call +} + +// UpdateEnvironmentConfigByCluster is a helper method to define mock.On call +// - ctxLogger *logrus.Entry +// - environment string +// - cluster string +// - config registry.IdentityConfig +// - writer registry.ConfigWriter +func (_e *MockConfigSyncer_Expecter) UpdateEnvironmentConfigByCluster(ctxLogger interface{}, environment interface{}, cluster interface{}, config interface{}, writer interface{}) *MockConfigSyncer_UpdateEnvironmentConfigByCluster_Call { + return &MockConfigSyncer_UpdateEnvironmentConfigByCluster_Call{Call: _e.mock.On("UpdateEnvironmentConfigByCluster", ctxLogger, environment, cluster, config, writer)} +} + +func (_c *MockConfigSyncer_UpdateEnvironmentConfigByCluster_Call) Run(run func(ctxLogger *logrus.Entry, environment string, cluster string, config registry.IdentityConfig, writer registry.ConfigWriter)) *MockConfigSyncer_UpdateEnvironmentConfigByCluster_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*logrus.Entry), args[1].(string), args[2].(string), args[3].(registry.IdentityConfig), args[4].(registry.ConfigWriter)) + }) + return _c +} + +func (_c *MockConfigSyncer_UpdateEnvironmentConfigByCluster_Call) Return(_a0 error) *MockConfigSyncer_UpdateEnvironmentConfigByCluster_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockConfigSyncer_UpdateEnvironmentConfigByCluster_Call) RunAndReturn(run func(*logrus.Entry, string, string, registry.IdentityConfig, registry.ConfigWriter) error) *MockConfigSyncer_UpdateEnvironmentConfigByCluster_Call { + _c.Call.Return(run) + return _c +} + +// NewMockConfigSyncer creates a new instance of MockConfigSyncer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockConfigSyncer(t interface { + mock.TestingT + Cleanup(func()) +}) *MockConfigSyncer { + mock := &MockConfigSyncer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/admiral/pkg/registry/registry.go b/admiral/pkg/registry/registry.go index 64e9125b..e566262e 100644 --- a/admiral/pkg/registry/registry.go +++ b/admiral/pkg/registry/registry.go @@ -2,14 +2,12 @@ package registry import ( "encoding/json" - "os" + "strings" - "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" log "github.com/sirupsen/logrus" - coreV1 "k8s.io/api/core/v1" ) // IdentityConfiguration is an interface to fetch configuration from a registry @@ -39,37 +37,6 @@ func WithRegistryEndpoint(registryEndpoint string) func(*registryClient) { } } -type IdentityConfig struct { - IdentityName string `json:"identityName"` - Clusters []IdentityConfigCluster `json:"clusters"` - ClientAssets []map[string]string `json:"clientAssets"` -} - -type IdentityConfigCluster struct { - Name string `json:"name"` - Locality string `json:"locality"` - IngressEndpoint string `json:"ingressEndpoint"` - IngressPort string `json:"ingressPort"` - IngressPortName string `json:"ingressPortName"` - Environment []IdentityConfigEnvironment `json:"environment"` -} - -type IdentityConfigEnvironment struct { - Name string `json:"name"` - Namespace string `json:"namespace"` - ServiceName string `json:"serviceName"` - Type string `json:"type"` - Selectors map[string]string `json:"selectors"` - Ports []coreV1.ServicePort `json:"ports"` - TrafficPolicy TrafficPolicy `json:"trafficPolicy"` -} - -type TrafficPolicy struct { - ClientConnectionConfig v1alpha1.ClientConnectionConfig `json:"clientConnectionConfig"` - GlobalTrafficPolicy v1alpha1.GlobalTrafficPolicy `json:"globalTrafficPolicy"` - OutlierDetection v1alpha1.OutlierDetection `json:"outlierDetection"` -} - // GetIdentityConfigByIdentityName calls the registry API to fetch the IdentityConfig for // the given identityAlias func (c *registryClient) GetIdentityConfigByIdentityName(identityAlias string, ctxLogger *log.Entry) (IdentityConfig, error) { diff --git a/admiral/pkg/registry/registry_test.go b/admiral/pkg/registry/registry_test.go index 0ae0733d..ecc0a99c 100644 --- a/admiral/pkg/registry/registry_test.go +++ b/admiral/pkg/registry/registry_test.go @@ -76,16 +76,15 @@ func TestIdentityConfigGetByIdentityName(t *testing.T) { } for _, c := range testCases { t.Run(c.name, func(t *testing.T) { - identityConfig, err := registryClient.GetIdentityConfigByIdentityName(c.identityAlias, ctxLogger) if err != nil && c.expectedError == nil { t.Errorf("error while getting identityConfig by name with error: %v", err) } else if err != nil && c.expectedError != nil && !errors.As(err, &c.expectedError) { t.Errorf("failed to get correct error: %v, instead got error: %v", c.expectedError, err) } else { - opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.TrafficPolicy{}, networkingV1Alpha3.LoadBalancerSettings{}, networkingV1Alpha3.LocalityLoadBalancerSetting{}, networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute{}, duration.Duration{}, networkingV1Alpha3.ConnectionPoolSettings{}, networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{}, networkingV1Alpha3.OutlierDetection{}, wrappers.UInt32Value{}) + opts := getUnexportedProperties() if !cmp.Equal(identityConfig, c.expectedIdentityConfig, opts) { - t.Errorf("mismatch between parsed JSON file and expected identity config for alias: %s", c.identityAlias) + t.Errorf("want=%v, got=%v", c.expectedIdentityConfig, identityConfig) t.Errorf(cmp.Diff(identityConfig, c.expectedIdentityConfig, opts)) } } @@ -129,7 +128,7 @@ func TestGetIdentityConfigByClusterName(t *testing.T) { } else if err != nil && c.expectedError != nil && !errors.As(err, &c.expectedError) { t.Errorf("failed to get correct error: %v, instead got error: %v", c.expectedError, err) } else { - opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.TrafficPolicy{}, networkingV1Alpha3.LoadBalancerSettings{}, networkingV1Alpha3.LocalityLoadBalancerSetting{}, networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute{}, duration.Duration{}, networkingV1Alpha3.ConnectionPoolSettings{}, networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{}, networkingV1Alpha3.OutlierDetection{}, wrappers.UInt32Value{}) + opts := getUnexportedProperties() if !cmp.Equal(identityConfigs[0], c.expectedIdentityConfig, opts) { t.Errorf("mismatch between parsed JSON file and expected identity config for file: %s", c.clusterName) t.Errorf(cmp.Diff(identityConfigs[0], c.expectedIdentityConfig, opts)) @@ -138,3 +137,17 @@ func TestGetIdentityConfigByClusterName(t *testing.T) { }) } } + +func getUnexportedProperties() cmp.Option { + return cmpopts.IgnoreUnexported( + networkingV1Alpha3.ServicePort{}, + networkingV1Alpha3.TrafficPolicy{}, + networkingV1Alpha3.LoadBalancerSettings{}, + networkingV1Alpha3.LocalityLoadBalancerSetting{}, + networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute{}, + duration.Duration{}, + networkingV1Alpha3.ConnectionPoolSettings{}, + networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{}, + networkingV1Alpha3.OutlierDetection{}, + wrappers.UInt32Value{}) +} diff --git a/admiral/pkg/registry/rolloutConfig_parser.go b/admiral/pkg/registry/rolloutConfig_parser.go new file mode 100644 index 00000000..0a177d51 --- /dev/null +++ b/admiral/pkg/registry/rolloutConfig_parser.go @@ -0,0 +1,15 @@ +package registry + +func NewRolloutConfigSyncer(cacheHandler IdentityConfigCache) *rolloutConfigSyncer { + return &rolloutConfigSyncer{ + cacheHandler: cacheHandler, + } +} + +type rolloutConfigSyncer struct { + cacheHandler IdentityConfigCache +} + +func (syncer *rolloutConfigSyncer) Sync(config interface{}) error { + return nil +} diff --git a/admiral/pkg/registry/testdata/record1IdentityConfiguration.json b/admiral/pkg/registry/testdata/record1IdentityConfiguration.json new file mode 100644 index 00000000..e9e047c2 --- /dev/null +++ b/admiral/pkg/registry/testdata/record1IdentityConfiguration.json @@ -0,0 +1,5 @@ +{ + "identityName": "record1", + "clusters": null, + "clientAssets": null +} \ No newline at end of file diff --git a/admiral/pkg/registry/testdata/record2IdentityConfiguration.json b/admiral/pkg/registry/testdata/record2IdentityConfiguration.json new file mode 100644 index 00000000..d5451420 --- /dev/null +++ b/admiral/pkg/registry/testdata/record2IdentityConfiguration.json @@ -0,0 +1,5 @@ +{ + "identityName": "record2", + "clusters": null, + "clientAssets": null +} \ No newline at end of file diff --git a/admiral/pkg/registry/testdata/sampleIdentityConfiguration.json b/admiral/pkg/registry/testdata/sampleIdentityConfiguration.json index 5c710162..1da11848 100644 --- a/admiral/pkg/registry/testdata/sampleIdentityConfiguration.json +++ b/admiral/pkg/registry/testdata/sampleIdentityConfiguration.json @@ -1,29 +1,37 @@ { - "identityName": "Intuit.ctg.taxprep.partnerdatatotax", - "clusters": [ - { + "identityName": "sample", + "clusters": { + "cluster1": { "_comment-1": "THIS SECTION CONTAINS CLUSTER LEVEL DETAILS, WHICH ARE THE SAME FOR THE ASSET IN A GIVEN CLUSTER", - "name": "cg-tax-ppd-usw2-k8s", + "name": "cluster1", "locality": "us-west-2", - "ingressEndpoint": "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + "ingressEndpoint": "abc-elb.us-west-2.elb.amazonaws.com.", "ingressPort": "15443", "ingressPortName": "http", "_comment-2": "THIS SECTION CONTAINS ENVIRONMENT LEVEL DETAILS, FOR THE ASSET IN A GIVEN CLUSTER", - "environment": [ - { + "environment": { + "prf": { "name": "prf", - "namespace": "ctg-taxprep-partnerdatatotax-usw2-prf", - "serviceName": "partner-data-to-tax-spk-root-service", + "namespace": "ns-1-usw2-prf", + "serviceName": "app-1-spk-root-service", + "services": { + "app-1-spk-root-service": { + "name": "app-1-spk-root-service", + "weight": -1, + "ports": { + "http": 8090 + } + } + }, "type": "rollout", "selectors": { - "app": "partner-data-to-tax" + "app": "app-1" }, "ports": [ { - "name": "http-service-mesh", - "port": 8090, - "protocol": "TCP", - "targetPort": 8090 + "name": "http", + "number": 80, + "protocol": "http" } ], "trafficPolicy": { @@ -80,20 +88,28 @@ } } }, - { + "e2e": { "name": "e2e", - "namespace": "ctg-taxprep-partnerdatatotax-usw2-e2e", - "serviceName": "partner-data-to-tax-spk-root-service", + "namespace": "ns-1-usw2-e2e", + "serviceName": "app-1-spk-root-service", + "services": { + "app-1-spk-root-service": { + "name": "app-1-spk-root-service", + "weight": -1, + "ports": { + "http": 8090 + } + } + }, "type": "rollout", "selectors": { - "app": "partner-data-to-tax" + "app": "app-1" }, "ports": [ { - "name": "http-service-mesh", - "port": 8090, - "protocol": "TCP", - "targetPort": 8090 + "name": "http", + "number": 80, + "protocol": "http" } ], "trafficPolicy": { @@ -150,20 +166,28 @@ } } }, - { + "qal": { "name": "qal", - "namespace": "ctg-taxprep-partnerdatatotax-usw2-qal", - "serviceName": "partner-data-to-tax-spk-root-service", + "namespace": "ns-1-usw2-qal", + "serviceName": "app-1-spk-root-service", + "services": { + "app-1-spk-root-service": { + "name": "app-1-spk-root-service", + "weight": -1, + "ports": { + "http": 8090 + } + } + }, "type": "rollout", "selectors": { - "app": "partner-data-to-tax" + "app": "app-1" }, "ports": [ { - "name": "http-service-mesh", - "port": 8090, - "protocol": "TCP", - "targetPort": 8090 + "name": "http", + "number": 80, + "protocol": "http" } ], "trafficPolicy": { @@ -220,36 +244,10 @@ } } } - ] - } - ], - "clientAssets": [ - { - "name": "intuit.cto.dev_portal" - }, - { - "name": "intuit.ctg.tto.browserclient" - }, - { - "name": "intuit.ctg.taxprep.partnerdatatotaxtestclient" - }, - { - "name": "intuit.productmarketing.ipu.pmec" - }, - { - "name": "intuit.tax.taxdev.txo" - }, - { - "name": "intuit.CTO.oauth2" - }, - { - "name": "intuit.platform.servicesgateway.servicesgateway" - }, - { - "name": "intuit.ctg.taxprep.partnerdatatotax" - }, - { - "name": "sample" + } } - ] + }, + "clientAssets": { + "sample": "sample" + } } \ No newline at end of file diff --git a/admiral/pkg/registry/testutils.go b/admiral/pkg/registry/testutils.go index b2bbec96..46085bcd 100644 --- a/admiral/pkg/registry/testutils.go +++ b/admiral/pkg/registry/testutils.go @@ -3,19 +3,27 @@ package registry import ( "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model" "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1" - coreV1 "k8s.io/api/core/v1" + networking "istio.io/api/networking/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" ) -func GetSampleIdentityConfigEnvironment(env string, namespace string) IdentityConfigEnvironment { - identityConfigEnvironment := IdentityConfigEnvironment{ +func GetSampleIdentityConfigEnvironment(env string, namespace string) *IdentityConfigEnvironment { + identityConfigEnvironment := &IdentityConfigEnvironment{ Name: env, Namespace: namespace, - ServiceName: "partner-data-to-tax-spk-root-service", - Type: "rollout", - Selectors: map[string]string{"app": "partner-data-to-tax"}, - Ports: []coreV1.ServicePort{{Name: "http-service-mesh", Port: int32(8090), Protocol: coreV1.ProtocolTCP, TargetPort: intstr.FromInt(8090)}}, + ServiceName: "app-1-spk-root-service", + Services: map[string]*RegistryServiceConfig{ + "app-1-spk-root-service": &RegistryServiceConfig{ + Name: "app-1-spk-root-service", + Weight: -1, + Ports: map[string]uint32{ + "http": 8090, + }, + }, + }, + Type: "rollout", + Selectors: map[string]string{"app": "app-1"}, + Ports: []*networking.ServicePort{{Name: "http", Number: uint32(80), Protocol: "http"}}, TrafficPolicy: TrafficPolicy{ ClientConnectionConfig: v1alpha1.ClientConnectionConfig{ ObjectMeta: v1.ObjectMeta{ @@ -75,22 +83,29 @@ func GetSampleIdentityConfigEnvironment(env string, namespace string) IdentityCo } func GetSampleIdentityConfig() IdentityConfig { - prfEnv := GetSampleIdentityConfigEnvironment("prf", "ctg-taxprep-partnerdatatotax-usw2-prf") - e2eEnv := GetSampleIdentityConfigEnvironment("e2e", "ctg-taxprep-partnerdatatotax-usw2-e2e") - qalEnv := GetSampleIdentityConfigEnvironment("qal", "ctg-taxprep-partnerdatatotax-usw2-qal") - environments := []IdentityConfigEnvironment{prfEnv, e2eEnv, qalEnv} - clientAssets := []map[string]string{{"name": "intuit.cto.dev_portal"}, {"name": "intuit.ctg.tto.browserclient"}, {"name": "intuit.ctg.taxprep.partnerdatatotaxtestclient"}, {"name": "intuit.productmarketing.ipu.pmec"}, {"name": "intuit.tax.taxdev.txo"}, {"name": "intuit.CTO.oauth2"}, {"name": "intuit.platform.servicesgateway.servicesgateway"}, {"name": "intuit.ctg.taxprep.partnerdatatotax"}, {"name": "sample"}} + prfEnv := GetSampleIdentityConfigEnvironment("prf", "ns-1-usw2-prf") + e2eEnv := GetSampleIdentityConfigEnvironment("e2e", "ns-1-usw2-e2e") + qalEnv := GetSampleIdentityConfigEnvironment("qal", "ns-1-usw2-qal") + environments := map[string]*IdentityConfigEnvironment{ + "prf": prfEnv, + "e2e": e2eEnv, + "qal": qalEnv, + } + clientAssets := map[string]string{ + "sample": "sample", + } cluster := IdentityConfigCluster{ - Name: "cg-tax-ppd-usw2-k8s", + Name: "cluster1", Locality: "us-west-2", - IngressEndpoint: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + IngressEndpoint: "abc-elb.us-west-2.elb.amazonaws.com.", IngressPort: "15443", IngressPortName: "http", Environment: environments, } identityConfig := IdentityConfig{ - IdentityName: "Intuit.ctg.taxprep.partnerdatatotax", - Clusters: []IdentityConfigCluster{cluster}, + IdentityName: "sample", + Clusters: map[string]*IdentityConfigCluster{ + "cluster1": &cluster}, ClientAssets: clientAssets, } return identityConfig diff --git a/admiral/pkg/types/types.go b/admiral/pkg/types/types.go new file mode 100644 index 00000000..8acf5bce --- /dev/null +++ b/admiral/pkg/types/types.go @@ -0,0 +1,11 @@ +package types + +import ( + coreV1 "k8s.io/api/core/v1" +) + +// WeightedService utility to store weighted services for argo rollouts +type WeightedService struct { + Weight int32 + Service *coreV1.Service +} diff --git a/admiral/pkg/util/constants.go b/admiral/pkg/util/constants.go index 807a6199..0387547c 100644 --- a/admiral/pkg/util/constants.go +++ b/admiral/pkg/util/constants.go @@ -6,4 +6,11 @@ const ( GrpcWeb = "grpc-web" Http2 = "http2" SecretShardKey = "shard" + + Deployment = "deployment" + Rollout = "rollout" + Service = "service" + GlobalTrafficPolicy = "globalTrafficPolicy" + OutlierDetection = "outlierDetection" + ClientConnectionConfig = "clientConnectionConfig" )