From 73b46ff7db55d5e497c6fb5607660c02ca5160f4 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Sat, 3 Feb 2018 18:44:57 -0500 Subject: [PATCH] Fix golint for openstack and cinder packages --- hack/.golint_failures | 2 - .../providers/openstack/metadata.go | 31 ++-- .../providers/openstack/metadata_test.go | 6 +- .../providers/openstack/openstack.go | 123 ++++++++------- .../providers/openstack/openstack_client.go | 6 + .../openstack/openstack_instances.go | 9 +- .../openstack/openstack_loadbalancer.go | 53 +++---- .../providers/openstack/openstack_metrics.go | 24 +-- .../providers/openstack/openstack_routes.go | 43 +++--- .../openstack/openstack_routes_test.go | 2 +- .../providers/openstack/openstack_test.go | 111 +++++++------- .../providers/openstack/openstack_volumes.go | 142 +++++++++--------- pkg/volume/cinder/attacher.go | 21 ++- pkg/volume/cinder/attacher_test.go | 64 ++++---- pkg/volume/cinder/cinder.go | 31 ++-- pkg/volume/cinder/cinder_util.go | 23 +-- 16 files changed, 360 insertions(+), 331 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 5854aafcaec..31f894ec5b4 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -88,7 +88,6 @@ pkg/cloudprovider/providers/aws pkg/cloudprovider/providers/fake pkg/cloudprovider/providers/gce pkg/cloudprovider/providers/gce/cloud -pkg/cloudprovider/providers/openstack pkg/cloudprovider/providers/ovirt pkg/cloudprovider/providers/photon pkg/cloudprovider/providers/vsphere @@ -391,7 +390,6 @@ pkg/volume/aws_ebs pkg/volume/azure_dd pkg/volume/azure_file pkg/volume/cephfs -pkg/volume/cinder pkg/volume/configmap pkg/volume/empty_dir pkg/volume/fc diff --git a/pkg/cloudprovider/providers/openstack/metadata.go b/pkg/cloudprovider/providers/openstack/metadata.go index 8f6c92cabc6..a69b3d81763 100644 --- a/pkg/cloudprovider/providers/openstack/metadata.go +++ b/pkg/cloudprovider/providers/openstack/metadata.go @@ -33,12 +33,12 @@ import ( ) const ( - // metadataUrlTemplate allows building an OpenStack Metadata service URL. + // metadataURLTemplate allows building an OpenStack Metadata service URL. // It's a hardcoded IPv4 link-local address as documented in "OpenStack Cloud // Administrator Guide", chapter Compute - Networking with nova-network. //https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service defaultMetadataVersion = "2012-08-10" - metadataUrlTemplate = "http://169.254.169.254/openstack/%s/meta_data.json" + metadataURLTemplate = "http://169.254.169.254/openstack/%s/meta_data.json" // metadataID is used as an identifier on the metadata search order configuration. metadataID = "metadataService" @@ -53,10 +53,10 @@ const ( configDriveID = "configDrive" ) +// ErrBadMetadata is used to indicate a problem parsing data from metadata server var ErrBadMetadata = errors.New("invalid OpenStack metadata, got empty uuid") -// There are multiple device types. To keep it simple, we're using a single structure -// for all device metadata types. +// DeviceMetadata is a single/simplified data structure for all kinds of device metadata types. type DeviceMetadata struct { Type string `json:"type"` Bus string `json:"bus,omitempty"` @@ -65,10 +65,11 @@ type DeviceMetadata struct { // .. and other fields. } -// Assumes the "2012-08-10" meta_data.json format. -//https://docs.openstack.org/nova/latest/user/config-drive.html +// Metadata has the information fetched from OpenStack metadata service or +// config drives. Assumes the "2012-08-10" meta_data.json format. +// See http://docs.openstack.org/user-guide/cli_config_drive.html type Metadata struct { - Uuid string `json:"uuid"` + UUID string `json:"uuid"` Hostname string `json:"hostname"` AvailabilityZone string `json:"availability_zone"` Devices []DeviceMetadata `json:"devices,omitempty"` @@ -84,15 +85,15 @@ func parseMetadata(r io.Reader) (*Metadata, error) { return nil, err } - if metadata.Uuid == "" { + if metadata.UUID == "" { return nil, ErrBadMetadata } return &metadata, nil } -func getMetadataUrl(metadataVersion string) string { - return fmt.Sprintf(metadataUrlTemplate, metadataVersion) +func getMetadataURL(metadataVersion string) string { + return fmt.Sprintf(metadataURLTemplate, metadataVersion) } func getConfigDrivePath(metadataVersion string) string { @@ -147,16 +148,16 @@ func getMetadataFromConfigDrive(metadataVersion string) (*Metadata, error) { func getMetadataFromMetadataService(metadataVersion string) (*Metadata, error) { // Try to get JSON from metadata server. - metadataUrl := getMetadataUrl(metadataVersion) - glog.V(4).Infof("Attempting to fetch metadata from %s", metadataUrl) - resp, err := http.Get(metadataUrl) + metadataURL := getMetadataURL(metadataVersion) + glog.V(4).Infof("Attempting to fetch metadata from %s", metadataURL) + resp, err := http.Get(metadataURL) if err != nil { - return nil, fmt.Errorf("error fetching %s: %v", metadataUrl, err) + return nil, fmt.Errorf("error fetching %s: %v", metadataURL, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("unexpected status code when reading metadata from %s: %s", metadataUrl, resp.Status) + err = fmt.Errorf("unexpected status code when reading metadata from %s: %s", metadataURL, resp.Status) return nil, err } diff --git a/pkg/cloudprovider/providers/openstack/metadata_test.go b/pkg/cloudprovider/providers/openstack/metadata_test.go index 7d586b7443d..53bd9532b5f 100644 --- a/pkg/cloudprovider/providers/openstack/metadata_test.go +++ b/pkg/cloudprovider/providers/openstack/metadata_test.go @@ -22,7 +22,7 @@ import ( ) var FakeMetadata = Metadata{ - Uuid: "83679162-1378-4288-a2d4-70e13ec132aa", + UUID: "83679162-1378-4288-a2d4-70e13ec132aa", Hostname: "test", AvailabilityZone: "nova", } @@ -85,8 +85,8 @@ func TestParseMetadata(t *testing.T) { t.Errorf("incorrect hostname: %s", md.Hostname) } - if md.Uuid != "83679162-1378-4288-a2d4-70e13ec132aa" { - t.Errorf("incorrect uuid: %s", md.Uuid) + if md.UUID != "83679162-1378-4288-a2d4-70e13ec132aa" { + t.Errorf("incorrect uuid: %s", md.UUID) } if md.AvailabilityZone != "nova" { diff --git a/pkg/cloudprovider/providers/openstack/openstack.go b/pkg/cloudprovider/providers/openstack/openstack.go index c39d015b8b7..c4d252f3067 100644 --- a/pkg/cloudprovider/providers/openstack/openstack.go +++ b/pkg/cloudprovider/providers/openstack/openstack.go @@ -49,20 +49,27 @@ import ( ) const ( + // ProviderName is the name of the openstack provider ProviderName = "openstack" - AvailabilityZone = "availability_zone" + availabilityZone = "availability_zone" defaultTimeOut = 60 * time.Second ) +// ErrNotFound is used to inform that the object is missing var ErrNotFound = errors.New("failed to find object") + +// ErrMultipleResults is used when we unexpectedly get back multiple results var ErrMultipleResults = errors.New("multiple results where only one expected") + +// ErrNoAddressFound is used when we cannot find an ip address for the host var ErrNoAddressFound = errors.New("no address found for host") -// encoding.TextUnmarshaler interface for time.Duration +// MyDuration is the encoding.TextUnmarshaler interface for time.Duration type MyDuration struct { time.Duration } +// UnmarshalText is used to convert from text to Duration func (d *MyDuration) UnmarshalText(text []byte) error { res, err := time.ParseDuration(string(text)) if err != nil { @@ -72,6 +79,7 @@ func (d *MyDuration) UnmarshalText(text []byte) error { return nil } +// LoadBalancer is used for creating and maintaining load balancers type LoadBalancer struct { network *gophercloud.ServiceClient compute *gophercloud.ServiceClient @@ -79,11 +87,12 @@ type LoadBalancer struct { opts LoadBalancerOpts } +// LoadBalancerOpts have the options to talk to Neutron LBaaSV2 or Octavia type LoadBalancerOpts struct { LBVersion string `gcfg:"lb-version"` // overrides autodetection. Only support v2. UseOctavia bool `gcfg:"use-octavia"` // uses Octavia V2 service catalog endpoint - SubnetId string `gcfg:"subnet-id"` // overrides autodetection. - FloatingNetworkId string `gcfg:"floating-network-id"` // If specified, will create floating ip for loadbalancer, or do not create floating ip. + SubnetID string `gcfg:"subnet-id"` // overrides autodetection. + FloatingNetworkID string `gcfg:"floating-network-id"` // If specified, will create floating ip for loadbalancer, or do not create floating ip. LBMethod string `gcfg:"lb-method"` // default to ROUND_ROBIN. LBProvider string `gcfg:"lb-provider"` CreateMonitor bool `gcfg:"create-monitor"` @@ -94,16 +103,19 @@ type LoadBalancerOpts struct { NodeSecurityGroupIDs []string // Do not specify, get it automatically when enable manage-security-groups. TODO(FengyunPan): move it into cache } +// BlockStorageOpts is used to talk to Cinder service type BlockStorageOpts struct { BSVersion string `gcfg:"bs-version"` // overrides autodetection. v1 or v2. Defaults to auto TrustDevicePath bool `gcfg:"trust-device-path"` // See Issue #33128 IgnoreVolumeAZ bool `gcfg:"ignore-volume-az"` } +// RouterOpts is used for Neutron routes type RouterOpts struct { - RouterId string `gcfg:"router-id"` // required + RouterID string `gcfg:"router-id"` // required } +// MetadataOpts is used for configuring how to talk to metadata service or config drive type MetadataOpts struct { SearchOrder string `gcfg:"search-order"` RequestTimeout MyDuration `gcfg:"request-timeout"` @@ -121,16 +133,17 @@ type OpenStack struct { localInstanceID string } +// Config is used to read and store information from the cloud configuration file type Config struct { Global struct { - AuthUrl string `gcfg:"auth-url"` + AuthURL string `gcfg:"auth-url"` Username string - UserId string `gcfg:"user-id"` + UserID string `gcfg:"user-id"` Password string - TenantId string `gcfg:"tenant-id"` + TenantID string `gcfg:"tenant-id"` TenantName string `gcfg:"tenant-name"` - TrustId string `gcfg:"trust-id"` - DomainId string `gcfg:"domain-id"` + TrustID string `gcfg:"trust-id"` + DomainID string `gcfg:"domain-id"` DomainName string `gcfg:"domain-name"` Region string CAFile string `gcfg:"ca-file"` @@ -142,7 +155,7 @@ type Config struct { } func init() { - RegisterMetrics() + registerMetrics() cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { cfg, err := readConfig(config) @@ -155,13 +168,13 @@ func init() { func (cfg Config) toAuthOptions() gophercloud.AuthOptions { return gophercloud.AuthOptions{ - IdentityEndpoint: cfg.Global.AuthUrl, + IdentityEndpoint: cfg.Global.AuthURL, Username: cfg.Global.Username, - UserID: cfg.Global.UserId, + UserID: cfg.Global.UserID, Password: cfg.Global.Password, - TenantID: cfg.Global.TenantId, + TenantID: cfg.Global.TenantID, TenantName: cfg.Global.TenantName, - DomainID: cfg.Global.DomainId, + DomainID: cfg.Global.DomainID, DomainName: cfg.Global.DomainName, // Persistent service, so we need to be able to renew tokens. @@ -171,11 +184,11 @@ func (cfg Config) toAuthOptions() gophercloud.AuthOptions { func (cfg Config) toAuth3Options() tokens3.AuthOptions { return tokens3.AuthOptions{ - IdentityEndpoint: cfg.Global.AuthUrl, + IdentityEndpoint: cfg.Global.AuthURL, Username: cfg.Global.Username, - UserID: cfg.Global.UserId, + UserID: cfg.Global.UserID, Password: cfg.Global.Password, - DomainID: cfg.Global.DomainId, + DomainID: cfg.Global.DomainID, DomainName: cfg.Global.DomainName, AllowReauth: true, } @@ -184,36 +197,38 @@ func (cfg Config) toAuth3Options() tokens3.AuthOptions { // configFromEnv allows setting up credentials etc using the // standard OS_* OpenStack client environment variables. func configFromEnv() (cfg Config, ok bool) { - cfg.Global.AuthUrl = os.Getenv("OS_AUTH_URL") + cfg.Global.AuthURL = os.Getenv("OS_AUTH_URL") cfg.Global.Username = os.Getenv("OS_USERNAME") cfg.Global.Password = os.Getenv("OS_PASSWORD") cfg.Global.Region = os.Getenv("OS_REGION_NAME") - cfg.Global.UserId = os.Getenv("OS_USER_ID") - cfg.Global.TrustId = os.Getenv("OS_TRUST_ID") + cfg.Global.UserID = os.Getenv("OS_USER_ID") + cfg.Global.TrustID = os.Getenv("OS_TRUST_ID") - cfg.Global.TenantId = os.Getenv("OS_TENANT_ID") - if cfg.Global.TenantId == "" { - cfg.Global.TenantId = os.Getenv("OS_PROJECT_ID") + cfg.Global.TenantID = os.Getenv("OS_TENANT_ID") + if cfg.Global.TenantID == "" { + cfg.Global.TenantID = os.Getenv("OS_PROJECT_ID") } cfg.Global.TenantName = os.Getenv("OS_TENANT_NAME") if cfg.Global.TenantName == "" { cfg.Global.TenantName = os.Getenv("OS_PROJECT_NAME") } - cfg.Global.DomainId = os.Getenv("OS_DOMAIN_ID") - if cfg.Global.DomainId == "" { - cfg.Global.DomainId = os.Getenv("OS_USER_DOMAIN_ID") + cfg.Global.DomainID = os.Getenv("OS_DOMAIN_ID") + if cfg.Global.DomainID == "" { + cfg.Global.DomainID = os.Getenv("OS_USER_DOMAIN_ID") } cfg.Global.DomainName = os.Getenv("OS_DOMAIN_NAME") if cfg.Global.DomainName == "" { cfg.Global.DomainName = os.Getenv("OS_USER_DOMAIN_NAME") } - ok = cfg.Global.AuthUrl != "" && + ok = cfg.Global.AuthURL != "" && cfg.Global.Username != "" && cfg.Global.Password != "" && - (cfg.Global.TenantId != "" || cfg.Global.TenantName != "" || - cfg.Global.DomainId != "" || cfg.Global.DomainName != "" || cfg.Global.Region != "" || cfg.Global.UserId != "" || cfg.Global.TrustId != "") + (cfg.Global.TenantID != "" || cfg.Global.TenantName != "" || + cfg.Global.DomainID != "" || cfg.Global.DomainName != "" || + cfg.Global.Region != "" || cfg.Global.UserID != "" || + cfg.Global.TrustID != "") cfg.Metadata.SearchOrder = fmt.Sprintf("%s,%s", configDriveID, metadataID) cfg.BlockStorage.BSVersion = "auto" @@ -238,13 +253,13 @@ func readConfig(config io.Reader) (Config, error) { return cfg, err } -// Tiny helper for conditional unwind logic -type Caller bool +// caller is a tiny helper for conditional unwind logic +type caller bool -func NewCaller() Caller { return Caller(true) } -func (c *Caller) Disarm() { *c = false } +func newCaller() caller { return caller(true) } +func (c *caller) disarm() { *c = false } -func (c *Caller) Call(f func()) { +func (c *caller) call(f func()) { if *c { f() } @@ -269,7 +284,7 @@ func readInstanceID(searchOrder string) (string, error) { return "", err } - return md.Uuid, nil + return md.UUID, nil } // check opts for OpenStack @@ -290,16 +305,11 @@ func checkOpenStackOpts(openstackOpts *OpenStack) error { return fmt.Errorf("monitor-max-retries not set in cloud provider config") } } - - if err := checkMetadataSearchOrder(openstackOpts.metadataOpts.SearchOrder); err != nil { - return err - } - - return nil + return checkMetadataSearchOrder(openstackOpts.metadataOpts.SearchOrder) } func newOpenStack(cfg Config) (*OpenStack, error) { - provider, err := openstack.NewClient(cfg.Global.AuthUrl) + provider, err := openstack.NewClient(cfg.Global.AuthURL) if err != nil { return nil, err } @@ -313,10 +323,10 @@ func newOpenStack(cfg Config) (*OpenStack, error) { provider.HTTPClient.Transport = netutil.SetOldTransportDefaults(&http.Transport{TLSClientConfig: config}) } - if cfg.Global.TrustId != "" { + if cfg.Global.TrustID != "" { opts := cfg.toAuth3Options() authOptsExt := trusts.AuthOptsExt{ - TrustID: cfg.Global.TrustId, + TrustID: cfg.Global.TrustID, AuthOptionsBuilder: &opts, } err = openstack.AuthenticateV3(provider, authOptsExt, gophercloud.EndpointOpts{}) @@ -360,7 +370,7 @@ func mapNodeNameToServerName(nodeName types.NodeName) string { return string(nodeName) } -// getNodeNameByID maps instanceid to types.NodeName +// GetNodeNameByID maps instanceid to types.NodeName func (os *OpenStack) GetNodeNameByID(instanceID string) (types.NodeName, error) { client, err := os.NewComputeV2() var nodeName types.NodeName @@ -441,7 +451,7 @@ func nodeAddresses(srv *servers.Server) ([]v1.NodeAddress, error) { addrs := []v1.NodeAddress{} type Address struct { - IpType string `mapstructure:"OS-EXT-IPS:type"` + IPType string `mapstructure:"OS-EXT-IPS:type"` Addr string } @@ -454,7 +464,7 @@ func nodeAddresses(srv *servers.Server) ([]v1.NodeAddress, error) { for network, addrList := range addresses { for _, props := range addrList { var addressType v1.NodeAddressType - if props.IpType == "floating" || network == "public" { + if props.IPType == "floating" || network == "public" { addressType = v1.NodeExternalIP } else { addressType = v1.NodeInternalIP @@ -537,6 +547,7 @@ func getAttachedInterfacesByID(client *gophercloud.ServiceClient, serviceID stri return interfaces, nil } +// Clusters is a no-op func (os *OpenStack) Clusters() (cloudprovider.Clusters, bool) { return nil, false } @@ -551,6 +562,7 @@ func (os *OpenStack) HasClusterID() bool { return true } +// LoadBalancer initializes a LbaasV2 object func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) { glog.V(4).Info("openstack.LoadBalancer() called") @@ -587,11 +599,13 @@ func isNotFound(err error) bool { return ok && e.Actual == http.StatusNotFound } +// Zones indicates that we support zones func (os *OpenStack) Zones() (cloudprovider.Zones, bool) { glog.V(1).Info("Claiming to support Zones") return os, true } +// GetZone returns the current zone func (os *OpenStack) GetZone() (cloudprovider.Zone, error) { md, err := getMetadata(os.metadataOpts.SearchOrder) if err != nil { @@ -626,7 +640,7 @@ func (os *OpenStack) GetZoneByProviderID(providerID string) (cloudprovider.Zone, } zone := cloudprovider.Zone{ - FailureDomain: srv.Metadata[AvailabilityZone], + FailureDomain: srv.Metadata[availabilityZone], Region: os.region, } glog.V(4).Infof("The instance %s in zone %v", srv.Name, zone) @@ -651,13 +665,14 @@ func (os *OpenStack) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Z } zone := cloudprovider.Zone{ - FailureDomain: srv.Metadata[AvailabilityZone], + FailureDomain: srv.Metadata[availabilityZone], Region: os.region, } glog.V(4).Infof("The instance %s in zone %v", srv.Name, zone) return zone, nil } +// Routes initializes routes support func (os *OpenStack) Routes() (cloudprovider.Routes, bool) { glog.V(4).Info("openstack.Routes() called") @@ -742,12 +757,12 @@ func (os *OpenStack) volumeService(forceVersion string) (volumeService, error) { return &VolumesV1{sClient, os.bsOpts}, nil } - err_txt := "BlockStorage API version autodetection failed. " + + errTxt := "BlockStorage API version autodetection failed. " + "Please set it explicitly in cloud.conf in section [BlockStorage] with key `bs-version`" - return nil, errors.New(err_txt) + return nil, errors.New(errTxt) default: - err_txt := fmt.Sprintf("Config error: unrecognised bs-version \"%v\"", os.bsOpts.BSVersion) - return nil, errors.New(err_txt) + errTxt := fmt.Sprintf("Config error: unrecognised bs-version \"%v\"", os.bsOpts.BSVersion) + return nil, errors.New(errTxt) } } diff --git a/pkg/cloudprovider/providers/openstack/openstack_client.go b/pkg/cloudprovider/providers/openstack/openstack_client.go index 04851070e4d..2eb2c24ddc1 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_client.go +++ b/pkg/cloudprovider/providers/openstack/openstack_client.go @@ -23,6 +23,7 @@ import ( "github.com/gophercloud/gophercloud/openstack" ) +// NewNetworkV2 creates a ServiceClient that may be used with the neutron v2 API func (os *OpenStack) NewNetworkV2() (*gophercloud.ServiceClient, error) { network, err := openstack.NewNetworkV2(os.provider, gophercloud.EndpointOpts{ Region: os.region, @@ -33,6 +34,7 @@ func (os *OpenStack) NewNetworkV2() (*gophercloud.ServiceClient, error) { return network, nil } +// NewComputeV2 creates a ServiceClient that may be used with the nova v2 API func (os *OpenStack) NewComputeV2() (*gophercloud.ServiceClient, error) { compute, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{ Region: os.region, @@ -43,6 +45,7 @@ func (os *OpenStack) NewComputeV2() (*gophercloud.ServiceClient, error) { return compute, nil } +// NewBlockStorageV1 creates a ServiceClient that may be used with the Cinder v1 API func (os *OpenStack) NewBlockStorageV1() (*gophercloud.ServiceClient, error) { storage, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{ Region: os.region, @@ -53,6 +56,7 @@ func (os *OpenStack) NewBlockStorageV1() (*gophercloud.ServiceClient, error) { return storage, nil } +// NewBlockStorageV2 creates a ServiceClient that may be used with the Cinder v2 API func (os *OpenStack) NewBlockStorageV2() (*gophercloud.ServiceClient, error) { storage, err := openstack.NewBlockStorageV2(os.provider, gophercloud.EndpointOpts{ Region: os.region, @@ -63,6 +67,7 @@ func (os *OpenStack) NewBlockStorageV2() (*gophercloud.ServiceClient, error) { return storage, nil } +// NewBlockStorageV3 creates a ServiceClient that may be used with the Cinder v3 API func (os *OpenStack) NewBlockStorageV3() (*gophercloud.ServiceClient, error) { storage, err := openstack.NewBlockStorageV3(os.provider, gophercloud.EndpointOpts{ Region: os.region, @@ -73,6 +78,7 @@ func (os *OpenStack) NewBlockStorageV3() (*gophercloud.ServiceClient, error) { return storage, nil } +// NewLoadBalancerV2 creates a ServiceClient that may be used with the Neutron LBaaS v2 API func (os *OpenStack) NewLoadBalancerV2() (*gophercloud.ServiceClient, error) { var lb *gophercloud.ServiceClient var err error diff --git a/pkg/cloudprovider/providers/openstack/openstack_instances.go b/pkg/cloudprovider/providers/openstack/openstack_instances.go index 51d177a638d..407da429413 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_instances.go +++ b/pkg/cloudprovider/providers/openstack/openstack_instances.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider" ) +// Instances encapsulates an implementation of Instances for OpenStack. type Instances struct { compute *gophercloud.ServiceClient opts MetadataOpts @@ -51,7 +52,7 @@ func (os *OpenStack) Instances() (cloudprovider.Instances, bool) { }, true } -// Implementation of Instances.CurrentNodeName +// CurrentNodeName implements Instances.CurrentNodeName // Note this is *not* necessarily the same as hostname. func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) { md, err := getMetadata(i.opts.SearchOrder) @@ -61,10 +62,12 @@ func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) { return types.NodeName(md.Hostname), nil } +// AddSSHKeyToAllInstances is not implemented for OpenStack func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error { return cloudprovider.NotImplemented } +// NodeAddresses implements Instances.NodeAddresses func (i *Instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) { glog.V(4).Infof("NodeAddresses(%v) called", name) @@ -212,9 +215,9 @@ func srvInstanceType(srv *servers.Server) (string, error) { // See cloudprovider.GetInstanceProviderID and Instances.InstanceID. func instanceIDFromProviderID(providerID string) (instanceID string, err error) { // If Instances.InstanceID or cloudprovider.GetInstanceProviderID is changed, the regexp should be changed too. - var providerIdRegexp = regexp.MustCompile(`^` + ProviderName + `:///([^/]+)$`) + var providerIDRegexp = regexp.MustCompile(`^` + ProviderName + `:///([^/]+)$`) - matches := providerIdRegexp.FindStringSubmatch(providerID) + matches := providerIDRegexp.FindStringSubmatch(providerID) if len(matches) != 2 { return "", fmt.Errorf("ProviderID \"%s\" didn't match expected format \"openstack:///InstanceID\"", providerID) } diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 164d222c126..47973eb29f5 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -68,7 +68,7 @@ const ( activeStatus = "ACTIVE" errorStatus = "ERROR" - ServiceAnnotationLoadBalancerFloatingNetworkId = "loadbalancer.openstack.org/floating-network-id" + ServiceAnnotationLoadBalancerFloatingNetworkID = "loadbalancer.openstack.org/floating-network-id" // ServiceAnnotationLoadBalancerInternal is the annotation used on the service // to indicate that we want an internal loadbalancer service. @@ -76,7 +76,7 @@ const ( ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/openstack-internal-load-balancer" ) -// LoadBalancer implementation for LBaaS v2 +// LbaasV2 is a LoadBalancer implementation for Neutron LBaaS v2 API type LbaasV2 struct { LoadBalancer } @@ -368,12 +368,10 @@ func waitLoadbalancerDeleted(client *gophercloud.ServiceClient, loadbalancerID s if err != nil { if err == ErrNotFound { return true, nil - } else { - return false, err } - } else { - return false, nil + return false, err } + return false, nil }) if err == wait.ErrWaitTimeout { @@ -442,7 +440,7 @@ func (lbaas *LbaasV2) createLoadBalancer(service *v1.Service, name string, inter createOpts := loadbalancers.CreateOpts{ Name: name, Description: fmt.Sprintf("Kubernetes external service %s", name), - VipSubnetID: lbaas.opts.SubnetId, + VipSubnetID: lbaas.opts.SubnetID, Provider: lbaas.opts.LBProvider, } @@ -458,6 +456,7 @@ func (lbaas *LbaasV2) createLoadBalancer(service *v1.Service, name string, inter return loadbalancer, nil } +// GetLoadBalancer returns whether the specified load balancer exists and its status func (lbaas *LbaasV2) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { loadBalancerName := cloudprovider.GetLoadBalancerName(service) loadbalancer, err := getLoadbalancerByName(lbaas.lb, loadBalancerName) @@ -485,7 +484,7 @@ func (lbaas *LbaasV2) GetLoadBalancer(clusterName string, service *v1.Service) ( } // The LB needs to be configured with instance addresses on the same -// subnet as the LB (aka opts.SubnetId). Currently we're just +// subnet as the LB (aka opts.SubnetID). Currently we're just // guessing that the node's InternalIP is the right address - and that // should be sufficient for all "normal" cases. func nodeAddressForLB(node *v1.Node) (string, error) { @@ -584,8 +583,8 @@ func isSecurityGroupNotFound(err error) bool { return false } -// getFloatingNetworkIdForLB returns a floating-network-id for cluster. -func getFloatingNetworkIdForLB(client *gophercloud.ServiceClient) (string, error) { +// getFloatingNetworkIDForLB returns a floating-network-id for cluster. +func getFloatingNetworkIDForLB(client *gophercloud.ServiceClient) (string, error) { var floatingNetworkIds []string type NetworkWithExternalExt struct { @@ -635,6 +634,7 @@ func getFloatingNetworkIdForLB(client *gophercloud.ServiceClient) (string, error // a list of regions (from config) and query/create loadbalancers in // each region. +// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodes, apiService.Annotations) @@ -642,16 +642,16 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv return nil, fmt.Errorf("there are no available nodes for LoadBalancer service %s/%s", apiService.Namespace, apiService.Name) } - if len(lbaas.opts.SubnetId) == 0 { - // Get SubnetId automatically. - // The LB needs to be configured with instance addresses on the same subnet, so get SubnetId by one node. + if len(lbaas.opts.SubnetID) == 0 { + // Get SubnetID automatically. + // The LB needs to be configured with instance addresses on the same subnet, so get SubnetID by one node. subnetID, err := getSubnetIDForLB(lbaas.compute, *nodes[0]) if err != nil { glog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) return nil, fmt.Errorf("no subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+ "and failed to find subnet-id from OpenStack: %v", apiService.Namespace, apiService.Name, err) } - lbaas.opts.SubnetId = subnetID + lbaas.opts.SubnetID = subnetID } ports := apiService.Spec.Ports @@ -659,10 +659,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv return nil, fmt.Errorf("no ports provided to openstack load balancer") } - floatingPool := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerFloatingNetworkId, lbaas.opts.FloatingNetworkId) + floatingPool := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerFloatingNetworkID, lbaas.opts.FloatingNetworkID) if len(floatingPool) == 0 { var err error - floatingPool, err = getFloatingNetworkIdForLB(lbaas.network) + floatingPool, err = getFloatingNetworkIDForLB(lbaas.network) if err != nil { glog.Warningf("Failed to find floating-network-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) } @@ -816,7 +816,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv _, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{ ProtocolPort: int(port.NodePort), Address: addr, - SubnetID: lbaas.opts.SubnetId, + SubnetID: lbaas.opts.SubnetID, }).Extract() if err != nil { return nil, fmt.Errorf("error creating LB pool member for node: %s, %v", node.Name, err) @@ -1111,8 +1111,8 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser // update loadbalancer vip port if !found { port.SecurityGroups = append(port.SecurityGroups, lbSecGroup.ID) - update_opts := neutronports.UpdateOpts{SecurityGroups: &port.SecurityGroups} - res := neutronports.Update(lbaas.network, portID, update_opts) + updateOpts := neutronports.UpdateOpts{SecurityGroups: &port.SecurityGroups} + res := neutronports.Update(lbaas.network, portID, updateOpts) if res.Err != nil { msg := fmt.Sprintf("Error occured updating port %s for loadbalancer service %s/%s: %v", portID, apiService.Namespace, apiService.Name, res.Err) return fmt.Errorf(msg) @@ -1152,20 +1152,21 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser return nil } +// UpdateLoadBalancer updates hosts under the specified load balancer. func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error { loadBalancerName := cloudprovider.GetLoadBalancerName(service) glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodes) - if len(lbaas.opts.SubnetId) == 0 && len(nodes) > 0 { - // Get SubnetId automatically. - // The LB needs to be configured with instance addresses on the same subnet, so get SubnetId by one node. + if len(lbaas.opts.SubnetID) == 0 && len(nodes) > 0 { + // Get SubnetID automatically. + // The LB needs to be configured with instance addresses on the same subnet, so get SubnetID by one node. subnetID, err := getSubnetIDForLB(lbaas.compute, *nodes[0]) if err != nil { glog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err) return fmt.Errorf("no subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+ "and failed to find subnet-id from OpenStack: %v", service.Namespace, service.Name, err) } - lbaas.opts.SubnetId = subnetID + lbaas.opts.SubnetID = subnetID } ports := service.Spec.Ports @@ -1254,7 +1255,7 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service _, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{ Address: addr, ProtocolPort: int(port.NodePort), - SubnetID: lbaas.opts.SubnetId, + SubnetID: lbaas.opts.SubnetID, }).Extract() if err != nil { return err @@ -1372,6 +1373,7 @@ func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *v1.Ser return nil } +// EnsureLoadBalancerDeleted deletes the specified load balancer func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error { loadBalancerName := cloudprovider.GetLoadBalancerName(service) glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v)", clusterName, loadBalancerName) @@ -1512,9 +1514,8 @@ func (lbaas *LbaasV2) EnsureSecurityGroupDeleted(clusterName string, service *v1 if isSecurityGroupNotFound(err) { // It is OK when the security group has been deleted by others. return nil - } else { - return fmt.Errorf("Error occurred finding security group: %s: %v", lbSecGroupName, err) } + return fmt.Errorf("Error occurred finding security group: %s: %v", lbSecGroupName, err) } lbSecGroup := groups.Delete(lbaas.network, lbSecGroupID) diff --git a/pkg/cloudprovider/providers/openstack/openstack_metrics.go b/pkg/cloudprovider/providers/openstack/openstack_metrics.go index 75dc398cb5b..ab8f9b68707 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_metrics.go +++ b/pkg/cloudprovider/providers/openstack/openstack_metrics.go @@ -19,32 +19,32 @@ package openstack import "github.com/prometheus/client_golang/prometheus" const ( - OpenstackSubsystem = "openstack" - OpenstackOperationKey = "cloudprovider_openstack_api_request_duration_seconds" - OpenstackOperationErrorKey = "cloudprovider_openstack_api_request_errors" + openstackSubsystem = "openstack" + openstackOperationKey = "cloudprovider_openstack_api_request_duration_seconds" + openstackOperationErrorKey = "cloudprovider_openstack_api_request_errors" ) var ( - OpenstackOperationsLatency = prometheus.NewHistogramVec( + openstackOperationsLatency = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Subsystem: OpenstackSubsystem, - Name: OpenstackOperationKey, + Subsystem: openstackSubsystem, + Name: openstackOperationKey, Help: "Latency of openstack api call", }, []string{"request"}, ) - OpenstackApiRequestErrors = prometheus.NewCounterVec( + openstackAPIRequestErrors = prometheus.NewCounterVec( prometheus.CounterOpts{ - Subsystem: OpenstackSubsystem, - Name: OpenstackOperationErrorKey, + Subsystem: openstackSubsystem, + Name: openstackOperationErrorKey, Help: "Cumulative number of openstack Api call errors", }, []string{"request"}, ) ) -func RegisterMetrics() { - prometheus.MustRegister(OpenstackOperationsLatency) - prometheus.MustRegister(OpenstackApiRequestErrors) +func registerMetrics() { + prometheus.MustRegister(openstackOperationsLatency) + prometheus.MustRegister(openstackAPIRequestErrors) } diff --git a/pkg/cloudprovider/providers/openstack/openstack_routes.go b/pkg/cloudprovider/providers/openstack/openstack_routes.go index bc24b571e83..49d27b51902 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_routes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_routes.go @@ -29,17 +29,19 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider" ) -var ErrNoRouterId = errors.New("router-id not set in cloud provider config") +var errNoRouterID = errors.New("router-id not set in cloud provider config") +// Routes implements the cloudprovider.Routes for OpenStack clouds type Routes struct { compute *gophercloud.ServiceClient network *gophercloud.ServiceClient opts RouterOpts } +// NewRoutes creates a new instance of Routes func NewRoutes(compute *gophercloud.ServiceClient, network *gophercloud.ServiceClient, opts RouterOpts) (cloudprovider.Routes, error) { - if opts.RouterId == "" { - return nil, ErrNoRouterId + if opts.RouterID == "" { + return nil, errNoRouterID } return &Routes{ @@ -49,6 +51,7 @@ func NewRoutes(compute *gophercloud.ServiceClient, network *gophercloud.ServiceC }, nil } +// ListRoutes lists all managed routes that belong to the specified clusterName func (r *Routes) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) { glog.V(4).Infof("ListRoutes(%v)", clusterName) @@ -70,7 +73,7 @@ func (r *Routes) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) return nil, err } - router, err := routers.Get(r.network, r.opts.RouterId).Extract() + router, err := routers.Get(r.network, r.opts.RouterID).Extract() if err != nil { return nil, err } @@ -136,10 +139,11 @@ func updateAllowedAddressPairs(network *gophercloud.ServiceClient, port *neutron return unwinder, nil } +// CreateRoute creates the described managed route func (r *Routes) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error { glog.V(4).Infof("CreateRoute(%v, %v, %v)", clusterName, nameHint, route) - onFailure := NewCaller() + onFailure := newCaller() addr, err := getAddressByName(r.compute, route.TargetNode) if err != nil { @@ -148,7 +152,7 @@ func (r *Routes) CreateRoute(clusterName string, nameHint string, route *cloudpr glog.V(4).Infof("Using nexthop %v for node %v", addr, route.TargetNode) - router, err := routers.Get(r.network, r.opts.RouterId).Extract() + router, err := routers.Get(r.network, r.opts.RouterID).Extract() if err != nil { return err } @@ -171,7 +175,7 @@ func (r *Routes) CreateRoute(clusterName string, nameHint string, route *cloudpr if err != nil { return err } - defer onFailure.Call(unwind) + defer onFailure.call(unwind) // get the port of addr on target node. portID, err := getPortIDByIP(r.compute, route.TargetNode, addr) @@ -200,25 +204,26 @@ func (r *Routes) CreateRoute(clusterName string, nameHint string, route *cloudpr if err != nil { return err } - defer onFailure.Call(unwind) + defer onFailure.call(unwind) } glog.V(4).Infof("Route created: %v", route) - onFailure.Disarm() + onFailure.disarm() return nil } +// DeleteRoute deletes the specified managed route func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) error { glog.V(4).Infof("DeleteRoute(%v, %v)", clusterName, route) - onFailure := NewCaller() + onFailure := newCaller() addr, err := getAddressByName(r.compute, route.TargetNode) if err != nil { return err } - router, err := routers.Get(r.network, r.opts.RouterId).Extract() + router, err := routers.Get(r.network, r.opts.RouterID).Extract() if err != nil { return err } @@ -245,7 +250,7 @@ func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) err if err != nil { return err } - defer onFailure.Call(unwind) + defer onFailure.call(unwind) // get the port of addr on target node. portID, err := getPortIDByIP(r.compute, route.TargetNode, addr) @@ -257,9 +262,9 @@ func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) err return err } - addr_pairs := port.AllowedAddressPairs + addrPairs := port.AllowedAddressPairs index = -1 - for i, item := range addr_pairs { + for i, item := range addrPairs { if item.IPAddress == route.DestinationCIDR { index = i break @@ -268,18 +273,18 @@ func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) err if index != -1 { // Delete element `index` - addr_pairs[index] = addr_pairs[len(addr_pairs)-1] - addr_pairs = addr_pairs[:len(addr_pairs)-1] + addrPairs[index] = addrPairs[len(addrPairs)-1] + addrPairs = addrPairs[:len(addrPairs)-1] - unwind, err := updateAllowedAddressPairs(r.network, port, addr_pairs) + unwind, err := updateAllowedAddressPairs(r.network, port, addrPairs) if err != nil { return err } - defer onFailure.Call(unwind) + defer onFailure.call(unwind) } glog.V(4).Infof("Route deleted: %v", route) - onFailure.Disarm() + onFailure.disarm() return nil } diff --git a/pkg/cloudprovider/providers/openstack/openstack_routes_test.go b/pkg/cloudprovider/providers/openstack/openstack_routes_test.go index 7b44c7945a9..a299f760771 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_routes_test.go +++ b/pkg/cloudprovider/providers/openstack/openstack_routes_test.go @@ -40,7 +40,7 @@ func TestRoutes(t *testing.T) { } // Pick the first router and server to try a test with - os.routeOpts.RouterId = getRouters(os)[0].ID + os.routeOpts.RouterID = getRouters(os)[0].ID servername := getServers(os)[0].Name r, ok := os.Routes() diff --git a/pkg/cloudprovider/providers/openstack/openstack_test.go b/pkg/cloudprovider/providers/openstack/openstack_test.go index 88618245a95..bddfe204a71 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_test.go +++ b/pkg/cloudprovider/providers/openstack/openstack_test.go @@ -37,9 +37,7 @@ import ( ) const ( - volumeAvailableStatus = "available" - volumeInUseStatus = "in-use" - testClusterName = "testCluster" + testClusterName = "testCluster" volumeStatusTimeoutSeconds = 30 // volumeStatus* is configuration of exponential backoff for @@ -68,9 +66,8 @@ func WaitForVolumeStatus(t *testing.T, os *OpenStack, volumeName string, status status, volumeStatusTimeoutSeconds) return true, nil - } else { - return false, nil } + return false, nil }) if err == wait.ErrWaitTimeout { t.Logf("Volume (%s) status did not change to %s after %v seconds\n", @@ -116,12 +113,12 @@ func TestReadConfig(t *testing.T) { if err != nil { t.Fatalf("Should succeed when a valid config is provided: %s", err) } - if cfg.Global.AuthUrl != "http://auth.url" { - t.Errorf("incorrect authurl: %s", cfg.Global.AuthUrl) + if cfg.Global.AuthURL != "http://auth.url" { + t.Errorf("incorrect authurl: %s", cfg.Global.AuthURL) } - if cfg.Global.UserId != "user" { - t.Errorf("incorrect userid: %s", cfg.Global.UserId) + if cfg.Global.UserID != "user" { + t.Errorf("incorrect userid: %s", cfg.Global.UserID) } if cfg.Global.Password != "mypass" { @@ -163,10 +160,10 @@ func TestToAuthOptions(t *testing.T) { cfg := Config{} cfg.Global.Username = "user" cfg.Global.Password = "pass" - cfg.Global.DomainId = "2a73b8f597c04551a0fdc8e95544be8a" + cfg.Global.DomainID = "2a73b8f597c04551a0fdc8e95544be8a" cfg.Global.DomainName = "local" - cfg.Global.AuthUrl = "http://auth.url" - cfg.Global.UserId = "user" + cfg.Global.AuthURL = "http://auth.url" + cfg.Global.UserID = "user" ao := cfg.toAuthOptions() @@ -179,20 +176,20 @@ func TestToAuthOptions(t *testing.T) { if ao.Password != cfg.Global.Password { t.Errorf("Password %s != %s", ao.Password, cfg.Global.Password) } - if ao.DomainID != cfg.Global.DomainId { - t.Errorf("DomainID %s != %s", ao.DomainID, cfg.Global.DomainId) + if ao.DomainID != cfg.Global.DomainID { + t.Errorf("DomainID %s != %s", ao.DomainID, cfg.Global.DomainID) } - if ao.IdentityEndpoint != cfg.Global.AuthUrl { - t.Errorf("IdentityEndpoint %s != %s", ao.IdentityEndpoint, cfg.Global.AuthUrl) + if ao.IdentityEndpoint != cfg.Global.AuthURL { + t.Errorf("IdentityEndpoint %s != %s", ao.IdentityEndpoint, cfg.Global.AuthURL) } - if ao.UserID != cfg.Global.UserId { - t.Errorf("UserID %s != %s", ao.UserID, cfg.Global.UserId) + if ao.UserID != cfg.Global.UserID { + t.Errorf("UserID %s != %s", ao.UserID, cfg.Global.UserID) } if ao.DomainName != cfg.Global.DomainName { t.Errorf("DomainName %s != %s", ao.DomainName, cfg.Global.DomainName) } - if ao.TenantID != cfg.Global.TenantId { - t.Errorf("TenantID %s != %s", ao.TenantID, cfg.Global.TenantId) + if ao.TenantID != cfg.Global.TenantID { + t.Errorf("TenantID %s != %s", ao.TenantID, cfg.Global.TenantID) } } @@ -210,8 +207,8 @@ func TestCheckOpenStackOpts(t *testing.T) { provider: nil, lbOpts: LoadBalancerOpts{ LBVersion: "v2", - SubnetId: "6261548e-ffde-4bc7-bd22-59c83578c5ef", - FloatingNetworkId: "38b8b5f9-64dc-4424-bf86-679595714786", + SubnetID: "6261548e-ffde-4bc7-bd22-59c83578c5ef", + FloatingNetworkID: "38b8b5f9-64dc-4424-bf86-679595714786", LBMethod: "ROUND_ROBIN", LBProvider: "haproxy", CreateMonitor: true, @@ -232,7 +229,7 @@ func TestCheckOpenStackOpts(t *testing.T) { provider: nil, lbOpts: LoadBalancerOpts{ LBVersion: "v2", - FloatingNetworkId: "38b8b5f9-64dc-4424-bf86-679595714786", + FloatingNetworkID: "38b8b5f9-64dc-4424-bf86-679595714786", LBMethod: "ROUND_ROBIN", CreateMonitor: true, MonitorDelay: delay, @@ -252,8 +249,8 @@ func TestCheckOpenStackOpts(t *testing.T) { provider: nil, lbOpts: LoadBalancerOpts{ LBVersion: "v2", - SubnetId: "6261548e-ffde-4bc7-bd22-59c83578c5ef", - FloatingNetworkId: "38b8b5f9-64dc-4424-bf86-679595714786", + SubnetID: "6261548e-ffde-4bc7-bd22-59c83578c5ef", + FloatingNetworkID: "38b8b5f9-64dc-4424-bf86-679595714786", LBMethod: "ROUND_ROBIN", CreateMonitor: true, MonitorTimeout: timeout, @@ -303,8 +300,8 @@ func TestCheckOpenStackOpts(t *testing.T) { provider: nil, lbOpts: LoadBalancerOpts{ LBVersion: "v2", - SubnetId: "6261548e-ffde-4bc7-bd22-59c83578c5ef", - FloatingNetworkId: "38b8b5f9-64dc-4424-bf86-679595714786", + SubnetID: "6261548e-ffde-4bc7-bd22-59c83578c5ef", + FloatingNetworkID: "38b8b5f9-64dc-4424-bf86-679595714786", LBMethod: "ROUND_ROBIN", CreateMonitor: true, MonitorDelay: delay, @@ -323,8 +320,8 @@ func TestCheckOpenStackOpts(t *testing.T) { provider: nil, lbOpts: LoadBalancerOpts{ LBVersion: "v2", - SubnetId: "6261548e-ffde-4bc7-bd22-59c83578c5ef", - FloatingNetworkId: "38b8b5f9-64dc-4424-bf86-679595714786", + SubnetID: "6261548e-ffde-4bc7-bd22-59c83578c5ef", + FloatingNetworkID: "38b8b5f9-64dc-4424-bf86-679595714786", LBMethod: "ROUND_ROBIN", CreateMonitor: true, MonitorDelay: delay, @@ -356,39 +353,39 @@ func TestCaller(t *testing.T) { called := false myFunc := func() { called = true } - c := NewCaller() - c.Call(myFunc) + c := newCaller() + c.call(myFunc) if !called { - t.Errorf("Caller failed to call function in default case") + t.Errorf("caller failed to call function in default case") } - c.Disarm() + c.disarm() called = false - c.Call(myFunc) + c.call(myFunc) if called { - t.Error("Caller still called function when disarmed") + t.Error("caller still called function when disarmed") } - // Confirm the "usual" deferred Caller pattern works as expected + // Confirm the "usual" deferred caller pattern works as expected called = false - success_case := func() { - c := NewCaller() - defer c.Call(func() { called = true }) - c.Disarm() + successCase := func() { + c := newCaller() + defer c.call(func() { called = true }) + c.disarm() } - if success_case(); called { + if successCase(); called { t.Error("Deferred success case still invoked unwind") } called = false - failure_case := func() { - c := NewCaller() - defer c.Call(func() { called = true }) + failureCase := func() { + c := newCaller() + defer c.call(func() { called = true }) } - if failure_case(); !called { + if failureCase(); !called { t.Error("Deferred failure case failed to invoke unwind") } } @@ -563,15 +560,15 @@ func TestVolumes(t *testing.T) { if err != nil { t.Logf("Cannot find instance id: %v - perhaps you are running this test outside a VM launched by OpenStack", err) } else { - diskId, err := os.AttachDisk(id, vol) + diskID, err := os.AttachDisk(id, vol) if err != nil { t.Fatalf("Cannot AttachDisk Cinder volume %s: %v", vol, err) } - t.Logf("Volume (%s) attached, disk ID: %s\n", vol, diskId) + t.Logf("Volume (%s) attached, disk ID: %s\n", vol, diskID) WaitForVolumeStatus(t, os, vol, volumeInUseStatus) - devicePath := os.GetDevicePath(diskId) + devicePath := os.GetDevicePath(diskID) if diskPathRegexp.FindString(devicePath) == "" { t.Fatalf("GetDevicePath returned and unexpected path for Cinder volume %s, returned %s", vol, devicePath) } @@ -654,10 +651,10 @@ func TestToAuth3Options(t *testing.T) { cfg := Config{} cfg.Global.Username = "user" cfg.Global.Password = "pass" - cfg.Global.DomainId = "2a73b8f597c04551a0fdc8e95544be8a" + cfg.Global.DomainID = "2a73b8f597c04551a0fdc8e95544be8a" cfg.Global.DomainName = "local" - cfg.Global.AuthUrl = "http://auth.url" - cfg.Global.UserId = "user" + cfg.Global.AuthURL = "http://auth.url" + cfg.Global.UserID = "user" ao := cfg.toAuth3Options() @@ -670,14 +667,14 @@ func TestToAuth3Options(t *testing.T) { if ao.Password != cfg.Global.Password { t.Errorf("Password %s != %s", ao.Password, cfg.Global.Password) } - if ao.DomainID != cfg.Global.DomainId { - t.Errorf("DomainID %s != %s", ao.DomainID, cfg.Global.DomainId) + if ao.DomainID != cfg.Global.DomainID { + t.Errorf("DomainID %s != %s", ao.DomainID, cfg.Global.DomainID) } - if ao.IdentityEndpoint != cfg.Global.AuthUrl { - t.Errorf("IdentityEndpoint %s != %s", ao.IdentityEndpoint, cfg.Global.AuthUrl) + if ao.IdentityEndpoint != cfg.Global.AuthURL { + t.Errorf("IdentityEndpoint %s != %s", ao.IdentityEndpoint, cfg.Global.AuthURL) } - if ao.UserID != cfg.Global.UserId { - t.Errorf("UserID %s != %s", ao.UserID, cfg.Global.UserId) + if ao.UserID != cfg.Global.UserID { + t.Errorf("UserID %s != %s", ao.UserID, cfg.Global.UserID) } if ao.DomainName != cfg.Global.DomainName { t.Errorf("DomainName %s != %s", ao.DomainName, cfg.Global.DomainName) diff --git a/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/pkg/cloudprovider/providers/openstack/openstack_volumes.go index db3f288a886..80139046f29 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -45,36 +45,37 @@ import ( ) type volumeService interface { - createVolume(opts VolumeCreateOpts) (string, string, error) + createVolume(opts volumeCreateOpts) (string, string, error) getVolume(volumeID string) (Volume, error) deleteVolume(volumeName string) error expandVolume(volumeID string, newSize int) error } -// Volumes implementation for v1 +// VolumesV1 is a Volumes implementation for cinder v1 type VolumesV1 struct { blockstorage *gophercloud.ServiceClient opts BlockStorageOpts } -// Volumes implementation for v2 +// VolumesV2 is a Volumes implementation for cinder v2 type VolumesV2 struct { blockstorage *gophercloud.ServiceClient opts BlockStorageOpts } -// Volumes implementation for v3 +// VolumesV3 is a Volumes implementation for cinder v3 type VolumesV3 struct { blockstorage *gophercloud.ServiceClient opts BlockStorageOpts } +// Volume stores information about a single volume type Volume struct { // ID of the instance, to which this volume is attached. "" if not attached - AttachedServerId string + AttachedServerID string // Device file path AttachedDevice string - // AvailabilityZone is which availability zone the volume is in + // availabilityZone is which availability zone the volume is in AvailabilityZone string // Unique identifier for the volume. ID string @@ -86,7 +87,7 @@ type Volume struct { Size int } -type VolumeCreateOpts struct { +type volumeCreateOpts struct { Size int Availability string Name string @@ -98,21 +99,21 @@ type VolumeCreateOpts struct { var _ cloudprovider.PVLabeler = (*OpenStack)(nil) const ( - VolumeAvailableStatus = "available" - VolumeInUseStatus = "in-use" - VolumeDeletedStatus = "deleted" - VolumeErrorStatus = "error" + volumeAvailableStatus = "available" + volumeInUseStatus = "in-use" + volumeDeletedStatus = "deleted" + volumeErrorStatus = "error" // On some environments, we need to query the metadata service in order // to locate disks. We'll use the Newton version, which includes device // metadata. - NewtonMetadataVersion = "2016-06-30" + newtonMetadataVersion = "2016-06-30" ) -func (volumes *VolumesV1) createVolume(opts VolumeCreateOpts) (string, string, error) { +func (volumes *VolumesV1) createVolume(opts volumeCreateOpts) (string, string, error) { startTime := time.Now() - create_opts := volumes_v1.CreateOpts{ + createOpts := volumes_v1.CreateOpts{ Name: opts.Name, Size: opts.Size, VolumeType: opts.VolumeType, @@ -120,7 +121,7 @@ func (volumes *VolumesV1) createVolume(opts VolumeCreateOpts) (string, string, e Metadata: opts.Metadata, } - vol, err := volumes_v1.Create(volumes.blockstorage, create_opts).Extract() + vol, err := volumes_v1.Create(volumes.blockstorage, createOpts).Extract() timeTaken := time.Since(startTime).Seconds() recordOpenstackOperationMetric("create_v1_volume", timeTaken, err) if err != nil { @@ -129,10 +130,10 @@ func (volumes *VolumesV1) createVolume(opts VolumeCreateOpts) (string, string, e return vol.ID, vol.AvailabilityZone, nil } -func (volumes *VolumesV2) createVolume(opts VolumeCreateOpts) (string, string, error) { +func (volumes *VolumesV2) createVolume(opts volumeCreateOpts) (string, string, error) { startTime := time.Now() - create_opts := volumes_v2.CreateOpts{ + createOpts := volumes_v2.CreateOpts{ Name: opts.Name, Size: opts.Size, VolumeType: opts.VolumeType, @@ -140,7 +141,7 @@ func (volumes *VolumesV2) createVolume(opts VolumeCreateOpts) (string, string, e Metadata: opts.Metadata, } - vol, err := volumes_v2.Create(volumes.blockstorage, create_opts).Extract() + vol, err := volumes_v2.Create(volumes.blockstorage, createOpts).Extract() timeTaken := time.Since(startTime).Seconds() recordOpenstackOperationMetric("create_v2_volume", timeTaken, err) if err != nil { @@ -149,10 +150,10 @@ func (volumes *VolumesV2) createVolume(opts VolumeCreateOpts) (string, string, e return vol.ID, vol.AvailabilityZone, nil } -func (volumes *VolumesV3) createVolume(opts VolumeCreateOpts) (string, string, error) { +func (volumes *VolumesV3) createVolume(opts volumeCreateOpts) (string, string, error) { startTime := time.Now() - create_opts := volumes_v3.CreateOpts{ + createOpts := volumes_v3.CreateOpts{ Name: opts.Name, Size: opts.Size, VolumeType: opts.VolumeType, @@ -160,7 +161,7 @@ func (volumes *VolumesV3) createVolume(opts VolumeCreateOpts) (string, string, e Metadata: opts.Metadata, } - vol, err := volumes_v3.Create(volumes.blockstorage, create_opts).Extract() + vol, err := volumes_v3.Create(volumes.blockstorage, createOpts).Extract() timeTaken := time.Since(startTime).Seconds() recordOpenstackOperationMetric("create_v3_volume", timeTaken, err) if err != nil { @@ -187,7 +188,7 @@ func (volumes *VolumesV1) getVolume(volumeID string) (Volume, error) { } if len(volumeV1.Attachments) > 0 && volumeV1.Attachments[0]["server_id"] != nil { - volume.AttachedServerId = volumeV1.Attachments[0]["server_id"].(string) + volume.AttachedServerID = volumeV1.Attachments[0]["server_id"].(string) volume.AttachedDevice = volumeV1.Attachments[0]["device"].(string) } @@ -212,7 +213,7 @@ func (volumes *VolumesV2) getVolume(volumeID string) (Volume, error) { } if len(volumeV2.Attachments) > 0 { - volume.AttachedServerId = volumeV2.Attachments[0].ServerID + volume.AttachedServerID = volumeV2.Attachments[0].ServerID volume.AttachedDevice = volumeV2.Attachments[0].Device } @@ -236,7 +237,7 @@ func (volumes *VolumesV3) getVolume(volumeID string) (Volume, error) { } if len(volumeV3.Attachments) > 0 { - volume.AttachedServerId = volumeV3.Attachments[0].ServerID + volume.AttachedServerID = volumeV3.Attachments[0].ServerID volume.AttachedDevice = volumeV3.Attachments[0].Device } @@ -269,10 +270,10 @@ func (volumes *VolumesV3) deleteVolume(volumeID string) error { func (volumes *VolumesV1) expandVolume(volumeID string, newSize int) error { startTime := time.Now() - create_opts := volumeexpand.ExtendSizeOpts{ + createOpts := volumeexpand.ExtendSizeOpts{ NewSize: newSize, } - err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, create_opts).ExtractErr() + err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr() timeTaken := time.Since(startTime).Seconds() recordOpenstackOperationMetric("expand_volume", timeTaken, err) return err @@ -280,10 +281,10 @@ func (volumes *VolumesV1) expandVolume(volumeID string, newSize int) error { func (volumes *VolumesV2) expandVolume(volumeID string, newSize int) error { startTime := time.Now() - create_opts := volumeexpand.ExtendSizeOpts{ + createOpts := volumeexpand.ExtendSizeOpts{ NewSize: newSize, } - err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, create_opts).ExtractErr() + err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr() timeTaken := time.Since(startTime).Seconds() recordOpenstackOperationMetric("expand_volume", timeTaken, err) return err @@ -291,25 +292,26 @@ func (volumes *VolumesV2) expandVolume(volumeID string, newSize int) error { func (volumes *VolumesV3) expandVolume(volumeID string, newSize int) error { startTime := time.Now() - create_opts := volumeexpand.ExtendSizeOpts{ + createOpts := volumeexpand.ExtendSizeOpts{ NewSize: newSize, } - err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, create_opts).ExtractErr() + err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr() timeTaken := time.Since(startTime).Seconds() recordOpenstackOperationMetric("expand_volume", timeTaken, err) return err } +// OperationPending checks if there is an operation pending on a volume func (os *OpenStack) OperationPending(diskName string) (bool, string, error) { volume, err := os.getVolume(diskName) if err != nil { return false, "", err } volumeStatus := volume.Status - if volumeStatus == VolumeErrorStatus { + if volumeStatus == volumeErrorStatus { return false, volumeStatus, nil } - if volumeStatus == VolumeAvailableStatus || volumeStatus == VolumeInUseStatus || volumeStatus == VolumeDeletedStatus { + if volumeStatus == volumeAvailableStatus || volumeStatus == volumeInUseStatus || volumeStatus == volumeDeletedStatus { return false, volume.Status, nil } return true, volumeStatus, nil @@ -327,13 +329,13 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) { return "", err } - if volume.AttachedServerId != "" { - if instanceID == volume.AttachedServerId { + if volume.AttachedServerID != "" { + if instanceID == volume.AttachedServerID { glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID) return volume.ID, nil } - nodeName, err := os.GetNodeNameByID(volume.AttachedServerId) - attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerId) + nodeName, err := os.GetNodeNameByID(volume.AttachedServerID) + attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerID) if err != nil { glog.Error(attachErr) return "", errors.New(attachErr) @@ -365,34 +367,34 @@ func (os *OpenStack) DetachDisk(instanceID, volumeID string) error { if err != nil { return err } - if volume.Status == VolumeAvailableStatus { + if volume.Status == volumeAvailableStatus { // "available" is fine since that means the volume is detached from instance already. glog.V(2).Infof("volume: %s has been detached from compute: %s ", volume.ID, instanceID) return nil } - if volume.Status != VolumeInUseStatus { + if volume.Status != volumeInUseStatus { return fmt.Errorf("can not detach volume %s, its status is %s", volume.Name, volume.Status) } cClient, err := os.NewComputeV2() if err != nil { return err } - if volume.AttachedServerId != instanceID { + if volume.AttachedServerID != instanceID { return fmt.Errorf("disk: %s has no attachments or is not attached to compute: %s", volume.Name, instanceID) - } else { - startTime := time.Now() - // This is a blocking call and effects kubelet's performance directly. - // We should consider kicking it out into a separate routine, if it is bad. - err = volumeattach.Delete(cClient, instanceID, volume.ID).ExtractErr() - timeTaken := time.Since(startTime).Seconds() - recordOpenstackOperationMetric("detach_disk", timeTaken, err) - if err != nil { - return fmt.Errorf("failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err) - } - glog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID) } + startTime := time.Now() + // This is a blocking call and effects kubelet's performance directly. + // We should consider kicking it out into a separate routine, if it is bad. + err = volumeattach.Delete(cClient, instanceID, volume.ID).ExtractErr() + timeTaken := time.Since(startTime).Seconds() + recordOpenstackOperationMetric("detach_disk", timeTaken, err) + if err != nil { + return fmt.Errorf("failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err) + } + glog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID) + return nil } @@ -402,7 +404,7 @@ func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, ne if err != nil { return oldSize, err } - if volume.Status != VolumeAvailableStatus { + if volume.Status != volumeAvailableStatus { // cinder volume can not be expanded if its status is not available return oldSize, fmt.Errorf("volume status is not available") } @@ -445,7 +447,7 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str return "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err) } - opts := VolumeCreateOpts{ + opts := volumeCreateOpts{ Name: name, Size: size, VolumeType: vtype, @@ -465,8 +467,8 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str return volumeID, volumeAZ, os.bsOpts.IgnoreVolumeAZ, nil } -// GetDevicePath returns the path of an attached block storage volume, specified by its id. -func (os *OpenStack) GetDevicePathBySerialId(volumeID string) string { +// GetDevicePathBySerialID returns the path of an attached block storage volume, specified by its id. +func (os *OpenStack) GetDevicePathBySerialID(volumeID string) string { // Build a list of candidate device paths. // Certain Nova drivers will set the disk serial ID, including the Cinder volume id. candidateDeviceNodes := []string{ @@ -493,7 +495,7 @@ func (os *OpenStack) GetDevicePathBySerialId(volumeID string) string { return "" } -func (os *OpenStack) GetDevicePathFromInstanceMetadata(volumeID string) string { +func (os *OpenStack) getDevicePathFromInstanceMetadata(volumeID string) string { // Nova Hyper-V hosts cannot override disk SCSI IDs. In order to locate // volumes, we're querying the metadata service. Note that the Hyper-V // driver will include device metadata for untagged volumes as well. @@ -501,7 +503,7 @@ func (os *OpenStack) GetDevicePathFromInstanceMetadata(volumeID string) string { // We're avoiding using cached metadata (or the configdrive), // relying on the metadata service. instanceMetadata, err := getMetadataFromMetadataService( - NewtonMetadataVersion) + newtonMetadataVersion) if err != nil { glog.V(4).Infof( @@ -544,10 +546,10 @@ func (os *OpenStack) GetDevicePathFromInstanceMetadata(volumeID string) string { // GetDevicePath returns the path of an attached block storage volume, specified by its id. func (os *OpenStack) GetDevicePath(volumeID string) string { - devicePath := os.GetDevicePathBySerialId(volumeID) + devicePath := os.GetDevicePathBySerialID(volumeID) if devicePath == "" { - devicePath = os.GetDevicePathFromInstanceMetadata(volumeID) + devicePath = os.getDevicePathFromInstanceMetadata(volumeID) } if devicePath == "" { @@ -557,6 +559,7 @@ func (os *OpenStack) GetDevicePath(volumeID string) string { return devicePath } +// DeleteVolume deletes a volume given volume name. func (os *OpenStack) DeleteVolume(volumeID string) error { used, err := os.diskIsUsed(volumeID) if err != nil { @@ -585,17 +588,16 @@ func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, if err != nil { return "", err } - if volume.Status != VolumeInUseStatus { + if volume.Status != volumeInUseStatus { return "", fmt.Errorf("can not get device path of volume %s, its status is %s ", volume.Name, volume.Status) } - if volume.AttachedServerId != "" { - if instanceID == volume.AttachedServerId { + if volume.AttachedServerID != "" { + if instanceID == volume.AttachedServerID { // Attachment[0]["device"] points to the device path // see http://developer.openstack.org/api-ref-blockstorage-v1.html return volume.AttachedDevice, nil - } else { - return "", fmt.Errorf("disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.AttachedServerId) } + return "", fmt.Errorf("disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.AttachedServerID) } return "", fmt.Errorf("volume %s has no ServerId", volumeID) } @@ -610,7 +612,7 @@ func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) { return false, err } - return instanceID == volume.AttachedServerId, nil + return instanceID == volume.AttachedServerID, nil } // DiskIsAttachedByName queries if a volume is attached to a compute instance by name @@ -624,9 +626,8 @@ func (os *OpenStack) DiskIsAttachedByName(nodeName types.NodeName, volumeID stri if err == ErrNotFound { // instance not found anymore in cloudprovider, assume that cinder is detached return false, "", nil - } else { - return false, "", err } + return false, "", err } instanceID := "/" + srv.ID if ind := strings.LastIndex(instanceID, "/"); ind >= 0 { @@ -665,9 +666,8 @@ func (os *OpenStack) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs [ attached[volumeID] = false } return attached, nil - } else { - return attached, err } + return attached, err } instanceID := "/" + srv.ID if ind := strings.LastIndex(instanceID, "/"); ind >= 0 { @@ -682,7 +682,7 @@ func (os *OpenStack) diskIsUsed(volumeID string) (bool, error) { if err != nil { return false, err } - return volume.AttachedServerId != "", nil + return volume.AttachedServerID != "", nil } // ShouldTrustDevicePath queries if we should trust the cinder provide deviceName, See issue #33128 @@ -715,8 +715,8 @@ func (os *OpenStack) GetLabelsForVolume(pv *v1.PersistentVolume) (map[string]str // recordOpenstackOperationMetric records openstack operation metrics func recordOpenstackOperationMetric(operation string, timeTaken float64, err error) { if err != nil { - OpenstackApiRequestErrors.With(prometheus.Labels{"request": operation}).Inc() + openstackAPIRequestErrors.With(prometheus.Labels{"request": operation}).Inc() } else { - OpenstackOperationsLatency.With(prometheus.Labels{"request": operation}).Observe(timeTaken) + openstackOperationsLatency.With(prometheus.Labels{"request": operation}).Observe(timeTaken) } } diff --git a/pkg/volume/cinder/attacher.go b/pkg/volume/cinder/attacher.go index ecda5536c5e..16edc3ab4a7 100644 --- a/pkg/volume/cinder/attacher.go +++ b/pkg/volume/cinder/attacher.go @@ -35,7 +35,7 @@ import ( type cinderDiskAttacher struct { host volume.VolumeHost - cinderProvider CinderProvider + cinderProvider BlockStorageProvider } var _ volume.Attacher = &cinderDiskAttacher{} @@ -215,7 +215,7 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath volumeID := volumeSource.VolumeID if devicePath == "" { - return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty.", volumeID) + return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty", volumeID) } ticker := time.NewTicker(probeVolumeInitDelay) @@ -237,16 +237,15 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath if exists && err == nil { glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath) return devicePath, nil - } else { - // Log an error, and continue checking periodically - glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err) - // Using exponential backoff instead of linear - ticker.Stop() - duration = time.Duration(float64(duration) * probeVolumeFactor) - ticker = time.NewTicker(duration) } + // Log an error, and continue checking periodically + glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err) + // Using exponential backoff instead of linear + ticker.Stop() + duration = time.Duration(float64(duration) * probeVolumeFactor) + ticker = time.NewTicker(duration) case <-timer.C: - return "", fmt.Errorf("Could not find attached Cinder disk %q. Timeout waiting for mount paths to be created.", volumeID) + return "", fmt.Errorf("could not find attached Cinder disk %q. Timeout waiting for mount paths to be created", volumeID) } } } @@ -299,7 +298,7 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st type cinderDiskDetacher struct { mounter mount.Interface - cinderProvider CinderProvider + cinderProvider BlockStorageProvider } var _ volume.Detacher = &cinderDiskDetacher{} diff --git a/pkg/volume/cinder/attacher_test.go b/pkg/volume/cinder/attacher_test.go index ddc307cd5fe..adaadf6e466 100644 --- a/pkg/volume/cinder/attacher_test.go +++ b/pkg/volume/cinder/attacher_test.go @@ -453,18 +453,18 @@ func (testcase *testcase) AttachDisk(instanceID, volumeID string) (string, error if expected.volumeID == "" && expected.instanceID == "" { // testcase.attach looks uninitialized, test did not expect to call // AttachDisk - testcase.t.Errorf("Unexpected AttachDisk call!") - return "", errors.New("Unexpected AttachDisk call!") + testcase.t.Errorf("unexpected AttachDisk call") + return "", errors.New("unexpected AttachDisk call") } if expected.volumeID != volumeID { - testcase.t.Errorf("Unexpected AttachDisk call: expected volumeID %s, got %s", expected.volumeID, volumeID) - return "", errors.New("Unexpected AttachDisk call: wrong volumeID") + testcase.t.Errorf("unexpected AttachDisk call: expected volumeID %s, got %s", expected.volumeID, volumeID) + return "", errors.New("unexpected AttachDisk call: wrong volumeID") } if expected.instanceID != instanceID { - testcase.t.Errorf("Unexpected AttachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID) - return "", errors.New("Unexpected AttachDisk call: wrong instanceID") + testcase.t.Errorf("unexpected AttachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID) + return "", errors.New("unexpected AttachDisk call: wrong instanceID") } glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", volumeID, instanceID, expected.retDeviceName, expected.ret) @@ -479,18 +479,18 @@ func (testcase *testcase) DetachDisk(instanceID, volumeID string) error { if expected.devicePath == "" && expected.instanceID == "" { // testcase.detach looks uninitialized, test did not expect to call // DetachDisk - testcase.t.Errorf("Unexpected DetachDisk call!") - return errors.New("Unexpected DetachDisk call!") + testcase.t.Errorf("unexpected DetachDisk call") + return errors.New("unexpected DetachDisk call") } if expected.devicePath != volumeID { - testcase.t.Errorf("Unexpected DetachDisk call: expected volumeID %s, got %s", expected.devicePath, volumeID) - return errors.New("Unexpected DetachDisk call: wrong volumeID") + testcase.t.Errorf("unexpected DetachDisk call: expected volumeID %s, got %s", expected.devicePath, volumeID) + return errors.New("unexpected DetachDisk call: wrong volumeID") } if expected.instanceID != instanceID { - testcase.t.Errorf("Unexpected DetachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID) - return errors.New("Unexpected DetachDisk call: wrong instanceID") + testcase.t.Errorf("unexpected DetachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID) + return errors.New("unexpected DetachDisk call: wrong instanceID") } glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", volumeID, instanceID, expected.ret) @@ -527,18 +527,18 @@ func (testcase *testcase) DiskIsAttached(instanceID, volumeID string) (bool, err if expected.volumeID == "" && expected.instanceID == "" { // testcase.diskIsAttached looks uninitialized, test did not expect to // call DiskIsAttached - testcase.t.Errorf("Unexpected DiskIsAttached call!") - return false, errors.New("Unexpected DiskIsAttached call!") + testcase.t.Errorf("unexpected DiskIsAttached call") + return false, errors.New("unexpected DiskIsAttached call") } if expected.volumeID != volumeID { - testcase.t.Errorf("Unexpected DiskIsAttached call: expected volumeID %s, got %s", expected.volumeID, volumeID) - return false, errors.New("Unexpected DiskIsAttached call: wrong volumeID") + testcase.t.Errorf("unexpected DiskIsAttached call: expected volumeID %s, got %s", expected.volumeID, volumeID) + return false, errors.New("unexpected DiskIsAttached call: wrong volumeID") } if expected.instanceID != instanceID { - testcase.t.Errorf("Unexpected DiskIsAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID) - return false, errors.New("Unexpected DiskIsAttached call: wrong instanceID") + testcase.t.Errorf("unexpected DiskIsAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID) + return false, errors.New("unexpected DiskIsAttached call: wrong instanceID") } glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", volumeID, instanceID, expected.isAttached, expected.ret) @@ -551,18 +551,18 @@ func (testcase *testcase) GetAttachmentDiskPath(instanceID, volumeID string) (st if expected.volumeID == "" && expected.instanceID == "" { // testcase.diskPath looks uninitialized, test did not expect to // call GetAttachmentDiskPath - testcase.t.Errorf("Unexpected GetAttachmentDiskPath call!") - return "", errors.New("Unexpected GetAttachmentDiskPath call!") + testcase.t.Errorf("unexpected GetAttachmentDiskPath call") + return "", errors.New("unexpected GetAttachmentDiskPath call") } if expected.volumeID != volumeID { - testcase.t.Errorf("Unexpected GetAttachmentDiskPath call: expected volumeID %s, got %s", expected.volumeID, volumeID) - return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong volumeID") + testcase.t.Errorf("unexpected GetAttachmentDiskPath call: expected volumeID %s, got %s", expected.volumeID, volumeID) + return "", errors.New("unexpected GetAttachmentDiskPath call: wrong volumeID") } if expected.instanceID != instanceID { - testcase.t.Errorf("Unexpected GetAttachmentDiskPath call: expected instanceID %s, got %s", expected.instanceID, instanceID) - return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong instanceID") + testcase.t.Errorf("unexpected GetAttachmentDiskPath call: expected instanceID %s, got %s", expected.instanceID, instanceID) + return "", errors.New("unexpected GetAttachmentDiskPath call: wrong instanceID") } glog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", volumeID, instanceID, expected.retPath, expected.ret) @@ -588,25 +588,25 @@ func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID } if expected.nodeName != nodeName { - testcase.t.Errorf("Unexpected DiskIsAttachedByName call: expected nodename %s, got %s", expected.nodeName, nodeName) - return false, instanceID, errors.New("Unexpected DiskIsAttachedByName call: wrong nodename") + testcase.t.Errorf("unexpected DiskIsAttachedByName call: expected nodename %s, got %s", expected.nodeName, nodeName) + return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong nodename") } if expected.volumeID == "" && expected.instanceID == "" { // testcase.diskIsAttached looks uninitialized, test did not expect to // call DiskIsAttached - testcase.t.Errorf("Unexpected DiskIsAttachedByName call!") - return false, instanceID, errors.New("Unexpected DiskIsAttachedByName call!") + testcase.t.Errorf("unexpected DiskIsAttachedByName call") + return false, instanceID, errors.New("unexpected DiskIsAttachedByName call") } if expected.volumeID != volumeID { - testcase.t.Errorf("Unexpected DiskIsAttachedByName call: expected volumeID %s, got %s", expected.volumeID, volumeID) - return false, instanceID, errors.New("Unexpected DiskIsAttachedByName call: wrong volumeID") + testcase.t.Errorf("unexpected DiskIsAttachedByName call: expected volumeID %s, got %s", expected.volumeID, volumeID) + return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong volumeID") } if expected.instanceID != instanceID { - testcase.t.Errorf("Unexpected DiskIsAttachedByName call: expected instanceID %s, got %s", expected.instanceID, instanceID) - return false, instanceID, errors.New("Unexpected DiskIsAttachedByName call: wrong instanceID") + testcase.t.Errorf("unexpected DiskIsAttachedByName call: expected instanceID %s, got %s", expected.instanceID, instanceID) + return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong instanceID") } glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret) diff --git a/pkg/volume/cinder/cinder.go b/pkg/volume/cinder/cinder.go index f0255d5f4f2..fa3fd95b03e 100644 --- a/pkg/volume/cinder/cinder.go +++ b/pkg/volume/cinder/cinder.go @@ -38,15 +38,17 @@ import ( ) const ( + // DefaultCloudConfigPath is the default path for cloud configuration DefaultCloudConfigPath = "/etc/kubernetes/cloud-config" ) -// This is the primary entrypoint for volume plugins. +// ProbeVolumePlugins is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&cinderPlugin{}} } -type CinderProvider interface { +// BlockStorageProvider is the interface for accessing cinder functionality. +type BlockStorageProvider interface { AttachDisk(instanceID, volumeID string) (string, error) DetachDisk(instanceID, volumeID string) error DeleteVolume(volumeID string) error @@ -120,7 +122,7 @@ func (plugin *cinderPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode { } func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { - return plugin.newMounterInternal(spec, pod.UID, &CinderDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName())) + return plugin.newMounterInternal(spec, pod.UID, &DiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName())) } func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Mounter, error) { @@ -147,7 +149,7 @@ func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.U } func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { - return plugin.newUnmounterInternal(volName, podUID, &CinderDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName())) + return plugin.newUnmounterInternal(volName, podUID, &DiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName())) } func (plugin *cinderPlugin) newUnmounterInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Unmounter, error) { @@ -162,7 +164,7 @@ func (plugin *cinderPlugin) newUnmounterInternal(volName string, podUID types.UI } func (plugin *cinderPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { - return plugin.newDeleterInternal(spec, &CinderDiskUtil{}) + return plugin.newDeleterInternal(spec, &DiskUtil{}) } func (plugin *cinderPlugin) newDeleterInternal(spec *volume.Spec, manager cdManager) (volume.Deleter, error) { @@ -179,7 +181,7 @@ func (plugin *cinderPlugin) newDeleterInternal(spec *volume.Spec, manager cdMana } func (plugin *cinderPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - return plugin.newProvisionerInternal(options, &CinderDiskUtil{}) + return plugin.newProvisionerInternal(options, &DiskUtil{}) } func (plugin *cinderPlugin) newProvisionerInternal(options volume.VolumeOptions, manager cdManager) (volume.Provisioner, error) { @@ -192,23 +194,22 @@ func (plugin *cinderPlugin) newProvisionerInternal(options volume.VolumeOptions, }, nil } -func (plugin *cinderPlugin) getCloudProvider() (CinderProvider, error) { +func (plugin *cinderPlugin) getCloudProvider() (BlockStorageProvider, error) { cloud := plugin.host.GetCloudProvider() if cloud == nil { if _, err := os.Stat(DefaultCloudConfigPath); err == nil { var config *os.File config, err = os.Open(DefaultCloudConfigPath) if err != nil { - return nil, errors.New(fmt.Sprintf("unable to load OpenStack configuration from default path : %v", err)) - } else { - defer config.Close() - cloud, err = cloudprovider.GetCloudProvider(openstack.ProviderName, config) - if err != nil { - return nil, errors.New(fmt.Sprintf("unable to create OpenStack cloud provider from default path : %v", err)) - } + return nil, fmt.Errorf("unable to load OpenStack configuration from default path : %v", err) + } + defer config.Close() + cloud, err = cloudprovider.GetCloudProvider(openstack.ProviderName, config) + if err != nil { + return nil, fmt.Errorf("unable to create OpenStack cloud provider from default path : %v", err) } } else { - return nil, errors.New(fmt.Sprintf("OpenStack cloud provider was not initialized properly : %v", err)) + return nil, fmt.Errorf("OpenStack cloud provider was not initialized properly : %v", err) } } diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go index d994dff0b1a..323690fa8cc 100644 --- a/pkg/volume/cinder/cinder_util.go +++ b/pkg/volume/cinder/cinder_util.go @@ -35,11 +35,12 @@ import ( "k8s.io/utils/exec" ) -type CinderDiskUtil struct{} +// DiskUtil has utility/helper methods +type DiskUtil struct{} -// Attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet. +// AttachDisk attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet. // Mounts the disk to its global path. -func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error { +func (util *DiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error { options := []string{} if b.readOnly { options = append(options, "ro") @@ -98,8 +99,8 @@ func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath stri return nil } -// Unmounts the device and detaches the disk from the kubelet's host machine. -func (util *CinderDiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error { +// DetachDisk unmounts the device and detaches the disk from the kubelet's host machine. +func (util *DiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error { globalPDPath := makeGlobalPDName(cd.plugin.host, cd.pdName) if err := cd.mounter.Unmount(globalPDPath); err != nil { return err @@ -124,7 +125,8 @@ func (util *CinderDiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error { return nil } -func (util *CinderDiskUtil) DeleteVolume(cd *cinderVolumeDeleter) error { +// DeleteVolume uses the cloud entrypoint to delete specified volume +func (util *DiskUtil) DeleteVolume(cd *cinderVolumeDeleter) error { cloud, err := cd.plugin.getCloudProvider() if err != nil { return err @@ -158,7 +160,8 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) { return zones, nil } -func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) { +// CreateVolume uses the cloud provider entrypoint for creating a volume +func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) { cloud, err := c.plugin.getCloudProvider() if err != nil { return "", 0, nil, "", err @@ -247,10 +250,10 @@ func probeAttachedVolume() error { } func scsiHostRescan() { - scsi_path := "/sys/class/scsi_host/" - if dirs, err := ioutil.ReadDir(scsi_path); err == nil { + scsiPath := "/sys/class/scsi_host/" + if dirs, err := ioutil.ReadDir(scsiPath); err == nil { for _, f := range dirs { - name := scsi_path + f.Name() + "/scan" + name := scsiPath + f.Name() + "/scan" data := []byte("- - -") ioutil.WriteFile(name, data, 0666) }