Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

detect unused annotation rules #2117

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 39 additions & 20 deletions openshift-hack/e2e/annotate/annotate.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,19 @@ func Run(testMaps map[string][]string, filter func(name string) bool) {
os.Exit(1)
}

unusedPatterns := false
for _, label := range generator.allLabels {
for _, match := range generator.matches[label] {
if !match.matched {
unusedPatterns = true
fmt.Fprintf(os.Stderr, "Unused pattern: %s => %s\n", label, match.pattern)
}
}
}
if unusedPatterns {
os.Exit(1)
}

// All tests must be associated with a sig (either upstream), or downstream
// If you get this error, you should add the [sig-X] tag to your test (if its
// in origin) or if it is upstream add a new rule to rules.go that assigns
Expand Down Expand Up @@ -110,25 +123,34 @@ func init() {
}
}

type matchable struct {
pattern string
literal string
re *regexp.Regexp
matched bool
}

func newGenerator(testMaps map[string][]string) *ginkgoTestRenamer {
var allLabels []string
matches := make(map[string]*regexp.Regexp)
stringMatches := make(map[string][]string)
matches := make(map[string][]*matchable)

for label, items := range testMaps {
sort.Strings(items)
allLabels = append(allLabels, label)
var remain []string
for _, item := range items {
if _, exists := matches[item]; exists {
fmt.Fprintf(os.Stderr, "multiple entries for pattern %q\n", item)
os.Exit(1)
}

match := &matchable{pattern: item}
re := regexp.MustCompile(item)
if p, ok := re.LiteralPrefix(); ok {
stringMatches[label] = append(stringMatches[label], p)
match.literal = p
} else {
remain = append(remain, item)
match.re = re
}
}
if len(remain) > 0 {
matches[label] = regexp.MustCompile(strings.Join(remain, `|`))
matches[label] = append(matches[label], match)
}
}
sort.Strings(allLabels)
Expand All @@ -137,7 +159,6 @@ func newGenerator(testMaps map[string][]string) *ginkgoTestRenamer {

return &ginkgoTestRenamer{
allLabels: allLabels,
stringMatches: stringMatches,
matches: matches,
excludedTestsFilter: excludedTestsFilter,
output: make(map[string]string),
Expand All @@ -154,10 +175,8 @@ func newRenamerFromGenerated(names map[string]string) *ginkgoTestRenamer {
type ginkgoTestRenamer struct {
// keys defined in TestMaps in openshift-hack/e2e/annotate/rules.go
allLabels []string
// exact substrings to match to apply a particular label
stringMatches map[string][]string
// regular expressions to match to apply a particular label
matches map[string]*regexp.Regexp
// matches to apply a particular label
matches map[string][]*matchable
// regular expression excluding permanently a set of tests
// see ExcludedTests in openshift-hack/e2e/annotate/rules.go
excludedTestsFilter *regexp.Regexp
Expand Down Expand Up @@ -193,17 +212,17 @@ func (r *ginkgoTestRenamer) generateRename(name string, node types.TestSpec) {
}

var hasLabel bool
for _, segment := range r.stringMatches[label] {
hasLabel = strings.Contains(newName, segment)
for _, match := range r.matches[label] {
if match.re != nil {
hasLabel = match.re.MatchString(newName)
} else {
hasLabel = strings.Contains(newName, match.literal)
}
if hasLabel {
match.matched = true
break
}
}
if !hasLabel {
if re := r.matches[label]; re != nil {
hasLabel = r.matches[label].MatchString(newName)
}
}

if hasLabel {
count++
Expand Down
102 changes: 3 additions & 99 deletions openshift-hack/e2e/annotate/rules.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ var (
`\[Feature:WatchList\]`,
`\[Feature:ServiceCIDRs\]`,
`\[Feature:ClusterTrustBundle\]`,
`\[Feature:SELinuxMount\]`,
`\[FeatureGate:SELinuxMount\]`,
`\[Feature:RelaxedEnvironmentVariableValidation\]`,
`\[Feature:UserNamespacesPodSecurityStandards\]`,
Expand All @@ -25,14 +24,7 @@ var (
},
// tests for features that are not implemented in openshift
"[Disabled:Unimplemented]": {
`Monitoring`, // Not installed, should be
`Cluster level logging`, // Not installed yet
`Kibana`, // Not installed
`Ubernetes`, // Can't set zone labels today
`kube-ui`, // Not installed by default
`Kubernetes Dashboard`, // Not installed by default (also probably slow image pull)
`should proxy to cadvisor`, // we don't expose cAdvisor port directly for security reasons
`\[Feature:BootstrapTokens\]`, // we don't serve cluster-info configmap
`\[Feature:BootstrapTokens\]`, // we don't serve cluster-info configmap
`\[Feature:KubeProxyDaemonSetMigration\]`, // upgrades are run separately
`\[Feature:BoundServiceAccountTokenVolume\]`, // upgrades are run separately
`\[Feature:StatefulUpgrade\]`, // upgrades are run separately
Expand All @@ -41,19 +33,14 @@ var (
"[Disabled:SpecialConfig]": {
// GPU node needs to be available
`\[Feature:GPUDevicePlugin\]`,
`\[sig-scheduling\] GPUDevicePluginAcrossRecreate \[Feature:Recreate\]`,

`\[Feature:LocalStorageCapacityIsolation\]`, // relies on a separate daemonset?
`\[sig-cloud-provider-gcp\]`, // these test require a different configuration - note that GCE tests from the sig-cluster-lifecycle were moved to the sig-cloud-provider-gcpcluster lifecycle see https://github.com/kubernetes/kubernetes/commit/0b3d50b6dccdc4bbd0b3e411c648b092477d79ac#diff-3b1910d08fb8fd8b32956b5e264f87cb

`kube-dns-autoscaler`, // Don't run kube-dns
`should check if Kubernetes master services is included in cluster-info`, // Don't run kube-dns
`DNS configMap`, // this tests dns federation configuration via configmap, which we don't support yet
`DNS configMap`, // this tests dns federation configuration via configmap, which we don't support yet

`NodeProblemDetector`, // requires a non-master node to run on
`Advanced Audit should audit API calls`, // expects to be able to call /logs

`Firewall rule should have correct firewall rules for e2e cluster`, // Upstream-install specific
`NodeProblemDetector`, // requires a non-master node to run on

// https://bugzilla.redhat.com/show_bug.cgi?id=2079958
`\[sig-network\] \[Feature:Topology Hints\] should distribute endpoints evenly`,
Expand All @@ -67,14 +54,12 @@ var (
// always add an issue here
"[Disabled:Broken]": {
`mount an API token into pods`, // We add 6 secrets, not 1
`ServiceAccounts should ensure a single API token exists`, // We create lots of secrets
`unchanging, static URL paths for kubernetes api services`, // the test needs to exclude URLs that are not part of conformance (/logs)
`Services should be able to up and down services`, // we don't have wget installed on nodes
`KubeProxy should set TCP CLOSE_WAIT timeout`, // the test require communication to port 11302 in the cluster nodes
`should check kube-proxy urls`, // previously this test was skipped b/c we reported -1 as the number of nodes, now we report proper number and test fails
`SSH`, // TRIAGE
`should implement service.kubernetes.io/service-proxy-name`, // this is an optional test that requires SSH. sig-network
`recreate nodes and ensure they function upon restart`, // https://bugzilla.redhat.com/show_bug.cgi?id=1756428
`\[Driver: iscsi\]`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711627

"RuntimeClass should reject",
Expand All @@ -85,7 +70,6 @@ var (

// TODO(node): configure the cri handler for the runtime class to make this work
"should run a Pod requesting a RuntimeClass with a configured handler",
"should reject a Pod requesting a RuntimeClass with conflicting node selector",
"should run a Pod requesting a RuntimeClass with scheduling",

// A fix is in progress: https://github.com/openshift/origin/pull/24709
Expand All @@ -98,9 +82,6 @@ var (
"MetricsGrabber should grab all metrics from a ControllerManager",
"MetricsGrabber should grab all metrics from a Scheduler",

// https://bugzilla.redhat.com/show_bug.cgi?id=1906808
`ServiceAccounts should support OIDC discovery of service account issuer`,

// NFS umount is broken in kernels 5.7+
// https://bugzilla.redhat.com/show_bug.cgi?id=1854379
`\[sig-storage\].*\[Driver: nfs\] \[Testpattern: Dynamic PV \(default fs\)\].*subPath should be able to unmount after the subpath directory is deleted`,
Expand All @@ -123,14 +104,8 @@ var (
`Netpol \[LinuxOnly\] NetworkPolicy between server and client using UDP should enforce policy based on Ports`,
`Netpol \[LinuxOnly\] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector`,

`Topology Hints should distribute endpoints evenly`,

// https://bugzilla.redhat.com/show_bug.cgi?id=1908645
`\[sig-network\] Networking Granular Checks: Services should function for service endpoints using hostNetwork`,
`\[sig-network\] Networking Granular Checks: Services should function for pod-Service\(hostNetwork\)`,

// https://bugzilla.redhat.com/show_bug.cgi?id=1952460
`\[sig-network\] Firewall rule control plane should not expose well-known ports`,

// https://bugzilla.redhat.com/show_bug.cgi?id=1988272
`\[sig-network\] Networking should provide Internet connection for containers \[Feature:Networking-IPv6\]`,
Expand All @@ -145,9 +120,6 @@ var (
// https://bugzilla.redhat.com/show_bug.cgi?id=1953478
`\[sig-storage\] Dynamic Provisioning Invalid AWS KMS key should report an error and create no PV`,

// https://issues.redhat.com/browse/OCPBUGS-34577
`\[sig-storage\] Multi-AZ Cluster Volumes should schedule pods in the same zones as statically provisioned PVs`,

// https://issues.redhat.com/browse/OCPBUGS-34594
`\[sig-node\] \[Feature:PodLifecycleSleepAction\] when create a pod with lifecycle hook using sleep action valid prestop hook using sleep action`,

Expand All @@ -166,16 +138,6 @@ var (
},
// tests that may work, but we don't support them
"[Disabled:Unsupported]": {
`\[Driver: rbd\]`, // OpenShift 4.x does not support Ceph RBD (use CSI instead)
`\[Driver: ceph\]`, // OpenShift 4.x does not support CephFS (use CSI instead)
`\[Driver: gluster\]`, // OpenShift 4.x does not support Gluster
`Volumes GlusterFS`, // OpenShift 4.x does not support Gluster
`GlusterDynamicProvisioner`, // OpenShift 4.x does not support Gluster

// Skip vSphere-specific storage tests. The standard in-tree storage tests for vSphere
// (prefixed with `In-tree Volumes [Driver: vsphere]`) are enough for testing this plugin.
// https://bugzilla.redhat.com/show_bug.cgi?id=2019115
`\[sig-storage\].*\[Feature:vsphere\]`,
// Also, our CI doesn't support topology, so disable those tests
`\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(delayed binding\)\] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies`,
`\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(delayed binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`,
Expand All @@ -184,7 +146,6 @@ var (
},
// tests too slow to be part of conformance
"[Slow]": {
`\[sig-scalability\]`, // disable from the default set for now
`should create and stop a working application`, // Inordinately slow tests

`\[Feature:PerformanceDNS\]`, // very slow
Expand All @@ -194,25 +155,13 @@ var (
// tests that are known flaky
"[Flaky]": {
`Job should run a job to completion when tasks sometimes fail and are not locally restarted`, // seems flaky, also may require too many resources
// TODO(node): test works when run alone, but not in the suite in CI
`\[Feature:HPA\] Horizontal pod autoscaling \(scale resource: CPU\) \[sig-autoscaling\] ReplicationController light Should scale from 1 pod to 2 pods`,
},
// tests that must be run without competition
"[Serial]": {
`\[Disruptive\]`,
`\[Feature:Performance\]`, // requires isolation

`Service endpoints latency`, // requires low latency
`Clean up pods on node`, // schedules up to max pods per node
`DynamicProvisioner should test that deleting a claim before the volume is provisioned deletes the volume`, // test is very disruptive to other tests

`Should be able to support the 1\.7 Sample API Server using the current Aggregator`, // down apiservices break other clients today https://bugzilla.redhat.com/show_bug.cgi?id=1623195

`\[Feature:HPA\] Horizontal pod autoscaling \(scale resource: CPU\) \[sig-autoscaling\] ReplicationController light Should scale from 1 pod to 2 pods`,

`should prevent Ingress creation if more than 1 IngressClass marked as default`, // https://bugzilla.redhat.com/show_bug.cgi?id=1822286

`\[sig-network\] IngressClass \[Feature:Ingress\] should set default value on new IngressClass`, //https://bugzilla.redhat.com/show_bug.cgi?id=1833583
},
// Tests that don't pass on disconnected, either due to requiring
// internet access for GitHub (e.g. many of the s2i builds), or
Expand Down Expand Up @@ -245,33 +194,14 @@ var (
`\[Feature:LoadBalancer\]`,
},
"[Skipped:gce]": {
// Requires creation of a different compute instance in a different zone and is not compatible with volumeBindingMode of WaitForFirstConsumer which we use in 4.x
`\[sig-storage\] Multi-AZ Cluster Volumes should only be allowed to provision PDs in zones where nodes exist`,

// The following tests try to ssh directly to a node. None of our nodes have external IPs
`\[k8s.io\] \[sig-node\] crictl should be able to run crictl on the node`,
`\[sig-storage\] Flexvolumes should be mountable`,
`\[sig-storage\] Detaching volumes should not work when mount is in progress`,

// We are using openshift-sdn to conceal metadata
`\[sig-auth\] Metadata Concealment should run a check-metadata-concealment job to completion`,

// https://bugzilla.redhat.com/show_bug.cgi?id=1740959
`\[sig-api-machinery\] AdmissionWebhook should be able to deny pod and configmap creation`,

// https://bugzilla.redhat.com/show_bug.cgi?id=1745720
`\[sig-storage\] CSI Volumes \[Driver: pd.csi.storage.gke.io\]`,

// https://bugzilla.redhat.com/show_bug.cgi?id=1749882
`\[sig-storage\] CSI Volumes CSI Topology test using GCE PD driver \[Serial\]`,

// https://bugzilla.redhat.com/show_bug.cgi?id=1751367
`gce-localssd-scsi-fs`,

// https://bugzilla.redhat.com/show_bug.cgi?id=1750851
// should be serial if/when it's re-enabled
`\[HPA\] Horizontal pod autoscaling \(scale resource: Custom Metrics from Stackdriver\)`,
`\[Feature:CustomMetricsAutoscaling\]`,
},
"[Skipped:ibmcloud]": {
// LoadBalancer tests in 1.31 require explicit platform-specific skips
Expand Down Expand Up @@ -304,29 +234,6 @@ var (
`\[Feature:LoadBalancer\]`,
},

"[sig-node]": {
`\[NodeConformance\]`,
`NodeLease`,
`lease API`,
`\[NodeFeature`,
`\[NodeAlphaFeature`,
`Probing container`,
`Security Context When creating a`,
`Downward API should create a pod that prints his name and namespace`,
`Liveness liveness pods should be automatically restarted`,
`Secret should create a pod that reads a secret`,
`Pods should delete a collection of pods`,
`Pods should run through the lifecycle of Pods and PodStatus`,
},
"[sig-cluster-lifecycle]": {
`Feature:ClusterAutoscalerScalability`,
`recreate nodes and ensure they function`,
},
"[sig-arch]": {
// not run, assigned to arch as catch-all
`\[Feature:GKELocalSSD\]`,
`\[Feature:GKENodePool\]`,
},
// Tests that don't pass under openshift-sdn.
// These are skipped explicitly by openshift-hack/test-kubernetes-e2e.sh,
// but will also be skipped by openshift-tests in jobs that use openshift-sdn.
Expand Down Expand Up @@ -373,8 +280,6 @@ var (
`\[sig-node\] NoExecuteTaintManager Multiple Pods \[Serial\] evicts pods with minTolerationSeconds \[Disruptive\] \[Conformance\]`,
`\[sig-node\] NoExecuteTaintManager Multiple Pods \[Serial\] only evicts pods without tolerations from tainted nodes`,
`\[sig-cli\] Kubectl client Kubectl taint \[Serial\] should remove all the taints with the same key off a node`,
`\[sig-network\] LoadBalancers should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on different nodes`,
`\[sig-network\] LoadBalancers should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on the same nodes`,
`\[sig-architecture\] Conformance Tests should have at least two untainted nodes`,
},

Expand All @@ -383,7 +288,6 @@ var (
// Requires CSISnapshot capability
`\[Feature:VolumeSnapshotDataSource\]`,
// Requires Storage capability
`\[Driver: aws\]`,
`\[Feature:StorageProvider\]`,
},

Expand Down