diff --git a/Dockerfile b/Dockerfile index fe485e3747..fb06487950 100644 --- a/Dockerfile +++ b/Dockerfile @@ -108,7 +108,7 @@ CMD ["sh", "-c", "/bin/barbican-kms-plugin --socketpath ${socketpath} --cloud-co ## # step 1: copy all necessary files from Debian distro to /dest folder -# all magic heppens in tools/csi-deps.sh +# all magic happens in tools/csi-deps.sh FROM --platform=${TARGETPLATFORM} ${DEBIAN_IMAGE} as cinder-csi-plugin-utils RUN clean-install bash rsync mount udev btrfs-progs e2fsprogs xfsprogs util-linux diff --git a/cmd/client-keystone-auth/main.go b/cmd/client-keystone-auth/main.go index 33193c4be3..0612507bf8 100644 --- a/cmd/client-keystone-auth/main.go +++ b/cmd/client-keystone-auth/main.go @@ -185,7 +185,7 @@ func main() { // Generate Gophercloud Auth Options based on input data from stdin // if IsTerminal returns "true", or from env variables otherwise. if !term.IsTerminal(int(os.Stdin.Fd())) { - // If all requiered arguments are set use them + // If all required arguments are set use them if argumentsAreSet(url, user, project, password, domain, applicationCredentialID, applicationCredentialName, applicationCredentialSecret) { options.AuthOptions = gophercloud.AuthOptions{ IdentityEndpoint: url, diff --git a/docs/cinder-csi-plugin/development.md b/docs/cinder-csi-plugin/development.md index bf90ba89cf..3b796b744a 100644 --- a/docs/cinder-csi-plugin/development.md +++ b/docs/cinder-csi-plugin/development.md @@ -14,4 +14,4 @@ There are two versions (`specVersion` and `Version`) defined at [driver.go](../../pkg/csi/cinder/driver.go) and both of them are in `x.y.z` format. `specVersion` indicates the version of [CSI spec](https://github.com/container-storage-interface/spec) that Cinder CSI supports whereas `Version` is the version of Cinder CSI driver itself. For new each release or major functionalities update such as options/params updated, you need increase `.z` version. If the CSI spec version is upgraded, the Cinder CSI version need bump as well. -For example, `specVersion` is `1.2.0` and `Version` is `1.2.1` then there's a new feature or option added but CSI spec remains same, the `specVersion` need to be kept as `1.2.0` and `Version` need to be bumped to `1.2.2`. If the CSI spec is bumpped to `1.3.0`, the `specVersion` and `Version` need to be bumped to `1.3.0` accordingly. +For example, `specVersion` is `1.2.0` and `Version` is `1.2.1` then there's a new feature or option added but CSI spec remains same, the `specVersion` need to be kept as `1.2.0` and `Version` need to be bumped to `1.2.2`. If the CSI spec is bumped to `1.3.0`, the `specVersion` and `Version` need to be bumped to `1.3.0` accordingly. diff --git a/docs/cinder-csi-plugin/examples.md b/docs/cinder-csi-plugin/examples.md index fa34da0f32..6a7e84d5b1 100644 --- a/docs/cinder-csi-plugin/examples.md +++ b/docs/cinder-csi-plugin/examples.md @@ -15,11 +15,11 @@ # Cinder CSI Driver Usage Examples -All following examples need to be used inside instance(s) provisoned by openstack, otherwise the attach action will fail due to fail to find instance ID from given openstack cloud. +All following examples need to be used inside instance(s) provisioned by openstack, otherwise the attach action will fail due to fail to find instance ID from given openstack cloud. ## Dynamic Volume Provisioning -For dynamic provisoning , create StorageClass, PersistentVolumeClaim and pod to consume it. +For dynamic provisioning , create StorageClass, PersistentVolumeClaim and pod to consume it. Checkout [sample app](../../examples/cinder-csi-plugin/nginx.yaml) definition fore reference. ```kubectl -f examples/cinder-csi-plugin/nginx.yaml create``` @@ -349,7 +349,7 @@ NAME READY STATUS RESTARTS AGE app 1/1 Running 0 5m11s ``` -Of course, A new availability zone `nova1` can be created in openstack side to satisify the requirement as well. +Of course, A new availability zone `nova1` can be created in openstack side to satisfy the requirement as well. ## Disaster recovery of PV and PVC diff --git a/docs/cinder-csi-plugin/features.md b/docs/cinder-csi-plugin/features.md index 266057c78a..b033ff791f 100644 --- a/docs/cinder-csi-plugin/features.md +++ b/docs/cinder-csi-plugin/features.md @@ -22,7 +22,7 @@ ## Dynamic Provisioning -Dynamic Provisoning uses persistence volume claim (PVC) to request the Kuberenetes to create the Cinder volume on behalf of user and consumes the volume from inside container. +Dynamic Provisioning uses persistence volume claim (PVC) to request the Kubernetes to create the Cinder volume on behalf of user and consumes the volume from inside container. For usage, refer [sample app](./examples.md#dynamic-volume-provisioning) diff --git a/docs/keystone-auth/using-keystone-webhook-authenticator-and-authorizer.md b/docs/keystone-auth/using-keystone-webhook-authenticator-and-authorizer.md index 03a60464b6..39dc611bcf 100644 --- a/docs/keystone-auth/using-keystone-webhook-authenticator-and-authorizer.md +++ b/docs/keystone-auth/using-keystone-webhook-authenticator-and-authorizer.md @@ -679,7 +679,7 @@ Next you have several ways to specify additional auth parameters: 2. Specify auth parameters in the `~/.kube/config` file. For more information read - [client keystone auth configuaration doc](./using-client-keystone-auth.md) + [client keystone auth configuration doc](./using-client-keystone-auth.md) and [credential plugins documentation](https://kubernetes.io/docs/admin/authentication/#client-go-credential-plugins) 3. Use the interactive mode. If auth parameters are not specified initially, diff --git a/docs/openstack-cloud-controller-manager/migrate-to-ccm-with-csimigration.md b/docs/openstack-cloud-controller-manager/migrate-to-ccm-with-csimigration.md index 98737fd7ce..7eaaa45717 100644 --- a/docs/openstack-cloud-controller-manager/migrate-to-ccm-with-csimigration.md +++ b/docs/openstack-cloud-controller-manager/migrate-to-ccm-with-csimigration.md @@ -156,7 +156,7 @@ systemctl restart kubelet Verify the CSI settings for that particular node: ``` -root@small-k8s-1:~# kubectl get csinode small-k8s-2 -oyaml +root@small-k8s-1:~# kubectl get csinode small-k8s-2 -o yaml apiVersion: storage.k8s.io/v1 kind: CSINode metadata: diff --git a/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md b/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md index ca8350dc85..823aaa44b8 100644 --- a/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md +++ b/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md @@ -59,7 +59,7 @@ The following guide has been tested to install Kubernetes v1.17 on Ubuntu 18.04. kubectl create secret -n kube-system generic cloud-config --from-file=cloud.conf ``` -- Create RBAC resources and openstack-cloud-controller-manager deamonset. +- Create RBAC resources and openstack-cloud-controller-manager daemonset. ```shell kubectl apply -f https://raw.githubusercontent.com/kubernetes/cloud-provider-openstack/master/manifests/controller-manager/cloud-controller-manager-roles.yaml diff --git a/pkg/autohealing/cloudprovider/openstack/provider.go b/pkg/autohealing/cloudprovider/openstack/provider.go index 68e508e6c3..70a27c9d0e 100644 --- a/pkg/autohealing/cloudprovider/openstack/provider.go +++ b/pkg/autohealing/cloudprovider/openstack/provider.go @@ -370,7 +370,7 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { err := provider.UpdateHealthStatus(masters, workers) if err != nil { - return fmt.Errorf("failed to update the helath status of cluster %s, error: %v", clusterName, err) + return fmt.Errorf("failed to update the health status of cluster %s, error: %v", clusterName, err) } cluster, err := clusters.Get(provider.Magnum, clusterName).Extract() @@ -627,7 +627,7 @@ func (provider CloudProvider) Enabled() bool { } if _, isPresent := cluster.Labels[ClusterAutoHealingLabel]; !isPresent { - log.Infof("Autohealing is disalbed for cluster %s", clusterName) + log.Infof("Autohealing is disabled for cluster %s", clusterName) return false } autoHealingEnabled, err := strconv.ParseBool(cluster.Labels[ClusterAutoHealingLabel]) @@ -636,7 +636,7 @@ func (provider CloudProvider) Enabled() bool { return false } if !autoHealingEnabled { - log.Infof("Autohealing is disalbed for cluster %s", clusterName) + log.Infof("Autohealing is disabled for cluster %s", clusterName) return false } @@ -659,7 +659,7 @@ func (provider CloudProvider) Enabled() bool { return true } -// CheckNodeCondition check if a node's conditon list contains the given condition type and status +// CheckNodeCondition check if a node's condition list contains the given condition type and status func CheckNodeCondition(node *apiv1.Node, conditionType apiv1.NodeConditionType, conditionStatus apiv1.ConditionStatus) bool { if len(node.Status.Conditions) == 0 { return false diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index 036539120b..110d3e3ec8 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -530,14 +530,14 @@ func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req _, err := cs.Cloud.GetVolume(volumeID) if err != nil { if cpoerrors.IsNotFound(err) { - return nil, status.Errorf(codes.NotFound, "ValidateVolumeCapabiltites Volume %s not found", volumeID) + return nil, status.Errorf(codes.NotFound, "ValidateVolumeCapabilities Volume %s not found", volumeID) } - return nil, status.Errorf(codes.Internal, "ValidateVolumeCapabiltites %v", err) + return nil, status.Errorf(codes.Internal, "ValidateVolumeCapabilities %v", err) } for _, cap := range reqVolCap { if cap.GetAccessMode().GetMode() != cs.Driver.vcap[0].Mode { - return &csi.ValidateVolumeCapabilitiesResponse{Message: "Requested Volume Capabilty not supported"}, nil + return &csi.ValidateVolumeCapabilitiesResponse{Message: "Requested Volume Capability not supported"}, nil } } diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go index eb32140a4c..afa0b02c7b 100644 --- a/pkg/csi/cinder/controllerserver_test.go +++ b/pkg/csi/cinder/controllerserver_test.go @@ -655,18 +655,18 @@ func TestValidateVolumeCapabilities(t *testing.T) { }, } - expectedRes2 := &csi.ValidateVolumeCapabilitiesResponse{Message: "Requested Volume Capabilty not supported"} + expectedRes2 := &csi.ValidateVolumeCapabilitiesResponse{Message: "Requested Volume Capability not supported"} - // Invoke ValidateVolumeCapabilties + // Invoke ValidateVolumeCapabilities actualRes, err := fakeCs.ValidateVolumeCapabilities(FakeCtx, fakereq) if err != nil { - t.Errorf("failed to ValidateVolumeCapabilties: %v", err) + t.Errorf("failed to ValidateVolumeCapabilities: %v", err) } actualRes2, err := fakeCs.ValidateVolumeCapabilities(FakeCtx, fakereq2) if err != nil { - t.Errorf("failed to ValidateVolumeCapabilties: %v", err) + t.Errorf("failed to ValidateVolumeCapabilities: %v", err) } // assert diff --git a/pkg/csi/cinder/nodeserver.go b/pkg/csi/cinder/nodeserver.go index 71cd39ac38..23058fcef9 100644 --- a/pkg/csi/cinder/nodeserver.go +++ b/pkg/csi/cinder/nodeserver.go @@ -317,7 +317,7 @@ func nodeUnpublishEphemeral(req *csi.NodeUnpublishVolumeRequest, ns *nodeServer, if len(vol.Attachments) > 0 { instanceID = vol.Attachments[0].ServerID } else { - return nil, status.Error(codes.FailedPrecondition, "Volume attachement not found in request") + return nil, status.Error(codes.FailedPrecondition, "Volume attachment not found in request") } err := ns.Cloud.DetachVolume(instanceID, volumeID) diff --git a/pkg/csi/cinder/openstack/openstack_snapshots.go b/pkg/csi/cinder/openstack/openstack_snapshots.go index 00dc9b5c65..6337e5313c 100644 --- a/pkg/csi/cinder/openstack/openstack_snapshots.go +++ b/pkg/csi/cinder/openstack/openstack_snapshots.go @@ -173,7 +173,7 @@ func (os *OpenStack) WaitSnapshotReady(snapshotID string) error { }) if wait.Interrupted(err) { - err = fmt.Errorf("Timeout, Snapshot %s is still not Ready %v", snapshotID, err) + err = fmt.Errorf("timeout, Snapshot %s is still not Ready %v", snapshotID, err) } return err diff --git a/pkg/csi/cinder/openstack/openstack_volumes.go b/pkg/csi/cinder/openstack/openstack_volumes.go index 083908e6b4..9ef45b455a 100644 --- a/pkg/csi/cinder/openstack/openstack_volumes.go +++ b/pkg/csi/cinder/openstack/openstack_volumes.go @@ -206,7 +206,7 @@ func (os *OpenStack) AttachVolume(instanceID, volumeID string) (string, error) { return volume.ID, nil } -// WaitDiskAttached waits for attched +// WaitDiskAttached waits for attached func (os *OpenStack) WaitDiskAttached(instanceID string, volumeID string) error { backoff := wait.Backoff{ Duration: diskAttachInitDelay, @@ -348,7 +348,7 @@ func (os *OpenStack) ExpandVolume(volumeID string, status string, newSize int) e switch status { case VolumeInUseStatus: - // If the user has disabled the use of microversion to be compatibale with + // If the user has disabled the use of microversion to be compatible with // older clouds, we should fail early if os.bsOpts.IgnoreVolumeMicroversion { return fmt.Errorf("volume online resize is not available with ignore-volume-microversion, requires microversion 3.42 or newer") diff --git a/pkg/identity/keystone/authorizer.go b/pkg/identity/keystone/authorizer.go index 08ff1f79ec..147f50587c 100644 --- a/pkg/identity/keystone/authorizer.go +++ b/pkg/identity/keystone/authorizer.go @@ -324,7 +324,7 @@ func (a *Authorizer) Authorize(attributes authorizer.Attributes) (authorized aut } } - // When the user.Extra does not exist, it means that the keytone user authentication has failed, and the authorization verification should not pass. + // When the user.Extra does not exist, it means that the keystone user authentication has failed, and the authorization verification should not pass. if user.GetExtra() == nil { return authorizer.DecisionDeny, "No auth info found.", nil } diff --git a/pkg/identity/keystone/config.go b/pkg/identity/keystone/config.go index 2d4a87c162..78814d03e0 100644 --- a/pkg/identity/keystone/config.go +++ b/pkg/identity/keystone/config.go @@ -88,7 +88,7 @@ func (c *Config) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&c.KeystoneCA, "keystone-ca-file", c.KeystoneCA, "File containing the certificate authority for Keystone Service.") fs.StringVar(&c.PolicyFile, "keystone-policy-file", c.PolicyFile, "File containing the policy, if provided, it takes precedence over the policy configmap.") fs.StringVar(&c.PolicyConfigMapName, "policy-configmap-name", c.PolicyConfigMapName, "ConfigMap in kube-system namespace containing the policy configuration, the ConfigMap data must contain the key 'policies'") - fs.StringVar(&c.SyncConfigFile, "sync-config-file", c.SyncConfigFile, "File containing config values for data synchronization beetween Keystone and Kubernetes.") - fs.StringVar(&c.SyncConfigMapName, "sync-configmap-name", "", "ConfigMap in kube-system namespace containing config values for data synchronization beetween Keystone and Kubernetes.") + fs.StringVar(&c.SyncConfigFile, "sync-config-file", c.SyncConfigFile, "File containing config values for data synchronization between Keystone and Kubernetes.") + fs.StringVar(&c.SyncConfigMapName, "sync-configmap-name", "", "ConfigMap in kube-system namespace containing config values for data synchronization between Keystone and Kubernetes.") fs.StringVar(&c.Kubeconfig, "kubeconfig", c.Kubeconfig, "Kubeconfig file used to connect to Kubernetes API to get policy configmap. If the service is running inside the pod, this option is not necessary, will use in-cluster config instead.") } diff --git a/pkg/identity/keystone/keystone.go b/pkg/identity/keystone/keystone.go index e37c7ce57f..2a8e7933d5 100644 --- a/pkg/identity/keystone/keystone.go +++ b/pkg/identity/keystone/keystone.go @@ -312,7 +312,7 @@ func (k *Auth) authenticateToken(w http.ResponseWriter, r *http.Request, token s var response status response.Authenticated = true - // Modify user info accoding to the sync configuration. + // Modify user info according to the sync configuration. response.User = *k.syncer.syncRoles(&info) data["status"] = response @@ -421,7 +421,7 @@ func NewKeystoneAuth(c *Config) (*Auth, error) { // Get policy definition either from a policy file or the policy configmap. Policy file takes precedence // over the configmap, but the policy definition will be refreshed based on the configmap change on-the-fly. It - // is possible that both are not provided, in this case, the keytone webhook authorization will always return deny. + // is possible that both are not provided, in this case, the keystone webhook authorization will always return deny. var policy policyList if c.PolicyConfigMapName != "" { cm, err := k8sClient.CoreV1().ConfigMaps(cmNamespace).Get(context.TODO(), c.PolicyConfigMapName, metav1.GetOptions{}) @@ -451,7 +451,7 @@ func NewKeystoneAuth(c *Config) (*Auth, error) { // Get sync config either from a sync config file or the sync configmap. Sync config file takes precedence // over the configmap, but the sync config definition will be refreshed based on the configmap change on-the-fly. It - // is possible that both are not provided, in this case, the keytone webhook authenticator will not synchronize data. + // is possible that both are not provided, in this case, the keystone webhook authenticator will not synchronize data. var sc *syncConfig if c.SyncConfigMapName != "" { cm, err := k8sClient.CoreV1().ConfigMaps(cmNamespace).Get(context.TODO(), c.SyncConfigMapName, metav1.GetOptions{}) @@ -544,7 +544,7 @@ func createIdentityV3Provider(options gophercloud.AuthOptions, transport http.Ro } chosen, _, err := utils.ChooseVersion(client, versions) if err != nil { - return nil, fmt.Errorf("Unable to find identity API v3 version : %v", err) + return nil, fmt.Errorf("unable to find identity API v3 version : %v", err) } switch chosen.ID { @@ -552,7 +552,7 @@ func createIdentityV3Provider(options gophercloud.AuthOptions, transport http.Ro return client, nil default: // The switch statement must be out of date from the versions list. - return nil, fmt.Errorf("Unsupported identity API version: %s", chosen.ID) + return nil, fmt.Errorf("unsupported identity API version: %s", chosen.ID) } } diff --git a/pkg/ingress/controller/controller.go b/pkg/ingress/controller/controller.go index b591258000..c81cf28ca5 100644 --- a/pkg/ingress/controller/controller.go +++ b/pkg/ingress/controller/controller.go @@ -90,7 +90,7 @@ const ( // https://github.com/kubernetes/cloud-provider/blob/25867882d509131a6fdeaf812ceebfd0f19015dd/controllers/service/controller.go#L673 LabelNodeExcludeLB = "node.kubernetes.io/exclude-from-external-load-balancers" - // DepcreatedLabelNodeRoleMaster specifies that a node is a master + // DeprecatedLabelNodeRoleMaster specifies that a node is a master // It's copied over to kubeadm until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 // Deprecated in favor of LabelNodeExcludeLB DeprecatedLabelNodeRoleMaster = "node-role.kubernetes.io/master" @@ -902,7 +902,7 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { } } - // Reconsile octavia resources. + // Reconcile octavia resources. rt := openstack.NewResourceTracker(ingfullName, c.osClient.Octavia, lb.ID, listener.ID, newPools, newPolicies, existingPools, oldPolicies) if err := rt.CreateResources(); err != nil { return err diff --git a/pkg/ingress/controller/openstack/client.go b/pkg/ingress/controller/openstack/client.go index 24a3100aaa..518e322607 100644 --- a/pkg/ingress/controller/openstack/client.go +++ b/pkg/ingress/controller/openstack/client.go @@ -73,7 +73,7 @@ func NewOpenStack(cfg config.Config) (*OpenStack, error) { var barbican *gophercloud.ServiceClient barbican, err = openstack.NewKeyManagerV1(provider, epOpts) if err != nil { - log.Warn("Barbican not suppported.") + log.Warn("Barbican not supported.") barbican = nil } diff --git a/pkg/kms/encryption/aescbc/aescbc_test.go b/pkg/kms/encryption/aescbc/aescbc_test.go index a64b7c355f..a9014f3ae4 100644 --- a/pkg/kms/encryption/aescbc/aescbc_test.go +++ b/pkg/kms/encryption/aescbc/aescbc_test.go @@ -9,7 +9,7 @@ import ( var key []byte func init() { - // genereate key for encrypt decrypt operation + // generate key for encrypt decrypt operation genKey() } diff --git a/pkg/openstack/loadbalancer.go b/pkg/openstack/loadbalancer.go index d31e41085a..b701dfffe8 100644 --- a/pkg/openstack/loadbalancer.go +++ b/pkg/openstack/loadbalancer.go @@ -154,7 +154,7 @@ func andMatcher(a, b matcher) matcher { } } -// reexpNameMatcher creates a subnet matcher matching a subnet by name for a given regexp. +// regexpNameMatcher creates a subnet matcher matching a subnet by name for a given regexp. func regexpNameMatcher(r *regexp.Regexp) matcher { return func(s *subnets.Subnet) bool { return r.FindString(s.Name) == s.Name } } @@ -800,7 +800,7 @@ func disassociateSecurityGroupForLB(network *gophercloud.ServiceClient, sg strin return err } - // Disassocate security group and remove the tag. + // Disassociate security group and remove the tag. for _, port := range allPorts { existingSGs := sets.NewString() for _, sgID := range port.SecurityGroups { diff --git a/pkg/openstack/loadbalancer_test.go b/pkg/openstack/loadbalancer_test.go index 58022027ef..115ea2f7ea 100644 --- a/pkg/openstack/loadbalancer_test.go +++ b/pkg/openstack/loadbalancer_test.go @@ -555,7 +555,7 @@ func TestLbaasV2_checkListenerPorts(t *testing.T) { wantErr bool }{ { - name: "error is not thrown if loadblanacer matches & if port is already in use by a lb", + name: "error is not thrown if loadbalancer matches & if port is already in use by a lb", args: args{ service: &corev1.Service{ Spec: corev1.ServiceSpec{ diff --git a/pkg/openstack/openstack.go b/pkg/openstack/openstack.go index 34d92eaf69..aee84846cd 100644 --- a/pkg/openstack/openstack.go +++ b/pkg/openstack/openstack.go @@ -122,7 +122,7 @@ type LoadBalancerOpts struct { IngressHostnameSuffix string `gcfg:"ingress-hostname-suffix"` // Used with proxy protocol by adding a dns suffix to the load balancer IP address. Default nip.io. MaxSharedLB int `gcfg:"max-shared-lb"` // Number of Services in maximum can share a single load balancer. Default 2 ContainerStore string `gcfg:"container-store"` // Used to specify the store of the tls-container-ref - ProviderRequiresSerialAPICalls bool `gcfg:"provider-requires-serial-api-calls"` // default false, the provider supportes the "bulk update" API call + ProviderRequiresSerialAPICalls bool `gcfg:"provider-requires-serial-api-calls"` // default false, the provider supports the "bulk update" API call // revive:disable:var-naming TlsContainerRef string `gcfg:"default-tls-container-ref"` // reference to a tls container // revive:enable:var-naming diff --git a/pkg/util/openstack/keymanager.go b/pkg/util/openstack/keymanager.go index aba991827c..ce35ffd0a0 100644 --- a/pkg/util/openstack/keymanager.go +++ b/pkg/util/openstack/keymanager.go @@ -86,11 +86,11 @@ func CreateSecret(client *gophercloud.ServiceClient, name string, secretType str return secret.SecretRef, nil } -// ParseSecretID return secret ID from serectRef +// ParseSecretID return secret ID from secretRef func ParseSecretID(ref string) (string, error) { parts := strings.Split(ref, "/") if len(parts) < 2 { - return "", fmt.Errorf("Could not parse %s", ref) + return "", fmt.Errorf("could not parse %s", ref) } return parts[len(parts)-1], nil diff --git a/pkg/util/openstack/loadbalancer.go b/pkg/util/openstack/loadbalancer.go index 9cd445b3ba..3734f81858 100644 --- a/pkg/util/openstack/loadbalancer.go +++ b/pkg/util/openstack/loadbalancer.go @@ -504,7 +504,7 @@ func GetPoolByListener(client *gophercloud.ServiceClient, lbID, listenerID strin return &listenerPools[0], nil } -// GetPools retrives the pools belong to the loadbalancer. +// GetPools retrieves the pools belong to the loadbalancer. func GetPools(client *gophercloud.ServiceClient, lbID string) ([]pools.Pool, error) { var lbPools []pools.Pool diff --git a/tests/e2e/csi/manila/manilavolume.go b/tests/e2e/csi/manila/manilavolume.go index d8783d1663..a322236565 100644 --- a/tests/e2e/csi/manila/manilavolume.go +++ b/tests/e2e/csi/manila/manilavolume.go @@ -29,7 +29,7 @@ func runCmd(name string, args ...string) ([]byte, error) { return stdout.Bytes(), err } -// It is assummed that the `openstack` and other related programs +// It is assumed that the `openstack` and other related programs // are accessible from $PATH on the node. type manilaVolume struct { diff --git a/tests/playbooks/roles/install-docker/tasks/main.yml b/tests/playbooks/roles/install-docker/tasks/main.yml index 70b8e69233..a27921424f 100644 --- a/tests/playbooks/roles/install-docker/tasks/main.yml +++ b/tests/playbooks/roles/install-docker/tasks/main.yml @@ -14,7 +14,7 @@ - curl # curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - -- name: Add Docker’s official GPG key +- name: Add Docker's official GPG key apt_key: url: https://download.docker.com/linux/ubuntu/gpg state: present diff --git a/tests/sanity/cinder/fakecloud.go b/tests/sanity/cinder/fakecloud.go index e167ddca31..d1bbed949f 100644 --- a/tests/sanity/cinder/fakecloud.go +++ b/tests/sanity/cinder/fakecloud.go @@ -57,7 +57,7 @@ func (cloud *cloud) DeleteVolume(volumeID string) error { } func (cloud *cloud) AttachVolume(instanceID, volumeID string) (string, error) { - // update the volume with attachement + // update the volume with attachment vol, ok := cloud.volumes[volumeID] diff --git a/tools/csi-deps.sh b/tools/csi-deps.sh index 2043942d69..d902ef62a8 100755 --- a/tools/csi-deps.sh +++ b/tools/csi-deps.sh @@ -49,7 +49,7 @@ copy_deps() { fi } -# Commmon lib /lib64/ld-linux-*.so.2 +# Common lib /lib64/ld-linux-*.so.2 # needs for all utils ARCH=$(uname -m) if [ $ARCH = "aarch64" ] || [ $ARCH = "armv7l" ]; then diff --git a/tools/test-setup.sh b/tools/test-setup.sh index 6330972de5..96610a9830 100755 --- a/tools/test-setup.sh +++ b/tools/test-setup.sh @@ -14,7 +14,7 @@ case $(uname -s) in if LSB_RELEASE=$(which lsb_release); then OS=$($LSB_RELEASE -s -c) else - # No lsb-release, trya hack or two + # No lsb-release, try a hack or two if which dpkg 1>/dev/null; then OS=debian elif which yum 1>/dev/null || which dnf 1>/dev/null; then