mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge pull request #87811 from mborsz/pv
Remove unnecessary calls to GCE API after PD is created
This commit is contained in:
commit
c69c91987b
@ -61,6 +61,7 @@ go_test(
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/volume:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
|
||||
"//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/mount:go_default_library",
|
||||
],
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
cloudvolume "k8s.io/cloud-provider/volume"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/legacy-cloud-providers/gce"
|
||||
|
||||
"strings"
|
||||
|
||||
@ -596,19 +597,19 @@ func (testcase *testcase) BulkDisksAreAttached(diskByNodes map[types.NodeName][]
|
||||
return verifiedDisksByNodes, nil
|
||||
}
|
||||
|
||||
func (testcase *testcase) CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error {
|
||||
return errors.New("Not implemented")
|
||||
func (testcase *testcase) CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) (*gce.Disk, error) {
|
||||
return nil, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) CreateRegionalDisk(name string, diskType string, replicaZones sets.String, sizeGb int64, tags map[string]string) error {
|
||||
return errors.New("Not implemented")
|
||||
func (testcase *testcase) CreateRegionalDisk(name string, diskType string, replicaZones sets.String, sizeGb int64, tags map[string]string) (*gce.Disk, error) {
|
||||
return nil, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) DeleteDisk(diskToDelete string) error {
|
||||
return errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetAutoLabelsForPD(name string, zone string) (map[string]string, error) {
|
||||
func (testcase *testcase) GetAutoLabelsForPD(*gce.Disk) (map[string]string, error) {
|
||||
return map[string]string{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
|
@ -147,6 +147,7 @@ func (util *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node *v1.
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
|
||||
var disk *gcecloud.Disk
|
||||
switch replicationType {
|
||||
case replicationTypeRegionalPD:
|
||||
selectedZones, err := volumehelpers.SelectZonesForVolume(zonePresent, zonesPresent, configuredZone, configuredZones, activezones, node, allowedTopologies, c.options.PVC.Name, maxRegionalPDZones)
|
||||
@ -154,12 +155,13 @@ func (util *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node *v1.
|
||||
klog.V(2).Infof("Error selecting zones for regional GCE PD volume: %v", err)
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
if err = cloud.CreateRegionalDisk(
|
||||
disk, err = cloud.CreateRegionalDisk(
|
||||
name,
|
||||
diskType,
|
||||
selectedZones,
|
||||
int64(requestGB),
|
||||
*c.options.CloudTags); err != nil {
|
||||
*c.options.CloudTags)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating regional GCE PD volume: %v", err)
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
@ -170,12 +172,13 @@ func (util *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node *v1.
|
||||
if err != nil {
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
if err := cloud.CreateDisk(
|
||||
disk, err = cloud.CreateDisk(
|
||||
name,
|
||||
diskType,
|
||||
selectedZone,
|
||||
int64(requestGB),
|
||||
*c.options.CloudTags); err != nil {
|
||||
*c.options.CloudTags)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating single-zone GCE PD volume: %v", err)
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
@ -185,7 +188,7 @@ func (util *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node *v1.
|
||||
return "", 0, nil, "", fmt.Errorf("replication-type of '%s' is not supported", replicationType)
|
||||
}
|
||||
|
||||
labels, err := cloud.GetAutoLabelsForPD(name, "" /* zone */)
|
||||
labels, err := cloud.GetAutoLabelsForPD(disk)
|
||||
if err != nil {
|
||||
// We don't really want to leak the volume here...
|
||||
klog.Errorf("error getting labels for volume %q: %v", name, err)
|
||||
|
@ -61,6 +61,8 @@ const (
|
||||
diskSourceURITemplateRegional = "%s/regions/%s/disks/%s" //{gce.projectID}/regions/{disk.Region}/disks/repd"
|
||||
|
||||
replicaZoneURITemplateSingleZone = "%s/zones/%s" // {gce.projectID}/zones/{disk.Zone}
|
||||
|
||||
diskKind = "compute#disk"
|
||||
)
|
||||
|
||||
type diskServiceManager interface {
|
||||
@ -70,7 +72,7 @@ type diskServiceManager interface {
|
||||
sizeGb int64,
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
zone string) error
|
||||
zone string) (*Disk, error)
|
||||
|
||||
// Creates a new regional persistent disk on GCE with the given disk spec.
|
||||
CreateRegionalDiskOnCloudProvider(
|
||||
@ -78,7 +80,7 @@ type diskServiceManager interface {
|
||||
sizeGb int64,
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
zones sets.String) error
|
||||
zones sets.String) (*Disk, error)
|
||||
|
||||
// Deletes the persistent disk from GCE with the given diskName.
|
||||
DeleteDiskOnCloudProvider(zone string, disk string) error
|
||||
@ -120,11 +122,11 @@ func (manager *gceServiceManager) CreateDiskOnCloudProvider(
|
||||
sizeGb int64,
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
zone string) error {
|
||||
zone string) (*Disk, error) {
|
||||
diskTypeURI, err := manager.getDiskTypeURI(
|
||||
manager.gce.region /* diskRegion */, singleZone{zone}, diskType)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diskToCreateV1 := &compute.Disk{
|
||||
@ -136,7 +138,14 @@ func (manager *gceServiceManager) CreateDiskOnCloudProvider(
|
||||
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
return manager.gce.c.Disks().Insert(ctx, meta.ZonalKey(name, zone), diskToCreateV1)
|
||||
disk := &Disk{
|
||||
ZoneInfo: singleZone{zone},
|
||||
Region: manager.gce.region,
|
||||
Kind: diskKind,
|
||||
Type: diskTypeURI,
|
||||
SizeGb: sizeGb,
|
||||
}
|
||||
return disk, manager.gce.c.Disks().Insert(ctx, meta.ZonalKey(name, zone), diskToCreateV1)
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
|
||||
@ -144,12 +153,12 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
|
||||
sizeGb int64,
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
replicaZones sets.String) error {
|
||||
replicaZones sets.String) (*Disk, error) {
|
||||
|
||||
diskTypeURI, err := manager.getDiskTypeURI(
|
||||
manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
fullyQualifiedReplicaZones := []string{}
|
||||
for _, replicaZone := range replicaZones.UnsortedList() {
|
||||
@ -167,7 +176,15 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
|
||||
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
return manager.gce.c.RegionDisks().Insert(ctx, meta.RegionalKey(name, manager.gce.region), diskToCreate)
|
||||
disk := &Disk{
|
||||
ZoneInfo: multiZone{replicaZones},
|
||||
Region: manager.gce.region,
|
||||
Name: name,
|
||||
Kind: diskKind,
|
||||
Type: diskTypeURI,
|
||||
SizeGb: sizeGb,
|
||||
}
|
||||
return disk, manager.gce.c.RegionDisks().Insert(ctx, meta.RegionalKey(name, manager.gce.region), diskToCreate)
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) AttachDiskOnCloudProvider(
|
||||
@ -432,12 +449,12 @@ type Disks interface {
|
||||
|
||||
// CreateDisk creates a new PD with given properties. Tags are serialized
|
||||
// as JSON into Description field.
|
||||
CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error
|
||||
CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) (*Disk, error)
|
||||
|
||||
// CreateRegionalDisk creates a new Regional Persistent Disk, with the
|
||||
// specified properties, replicated to the specified zones. Tags are
|
||||
// serialized as JSON into Description field.
|
||||
CreateRegionalDisk(name string, diskType string, replicaZones sets.String, sizeGb int64, tags map[string]string) error
|
||||
CreateRegionalDisk(name string, diskType string, replicaZones sets.String, sizeGb int64, tags map[string]string) (*Disk, error)
|
||||
|
||||
// DeleteDisk deletes PD.
|
||||
DeleteDisk(diskToDelete string) error
|
||||
@ -447,9 +464,7 @@ type Disks interface {
|
||||
|
||||
// GetAutoLabelsForPD returns labels to apply to PersistentVolume
|
||||
// representing this PD, namely failure domain and zone.
|
||||
// zone can be provided to specify the zone for the PD,
|
||||
// if empty all managed zones will be searched.
|
||||
GetAutoLabelsForPD(name string, zone string) (map[string]string, error)
|
||||
GetAutoLabelsForPD(disk *Disk) (map[string]string, error)
|
||||
}
|
||||
|
||||
// Cloud implements Disks.
|
||||
@ -499,9 +514,14 @@ func (g *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume)
|
||||
}
|
||||
|
||||
// If the zone is already labeled, honor the hint
|
||||
name := pv.Spec.GCEPersistentDisk.PDName
|
||||
zone := pv.Labels[v1.LabelZoneFailureDomain]
|
||||
|
||||
labels, err := g.GetAutoLabelsForPD(pv.Spec.GCEPersistentDisk.PDName, zone)
|
||||
disk, err := g.getDiskByNameAndOptionalLabelZones(name, zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
labels, err := g.GetAutoLabelsForPD(disk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -509,6 +529,22 @@ func (g *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume)
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
// getDiskByNameAndOptionalZone returns a Disk object for a disk (zonal or regional) for given name and (optional) zone(s) label.
|
||||
func (g *Cloud) getDiskByNameAndOptionalLabelZones(name, labelZone string) (*Disk, error) {
|
||||
if labelZone == "" {
|
||||
return g.GetDiskByNameUnknownZone(name)
|
||||
}
|
||||
zoneSet, err := volumehelpers.LabelZonesToSet(labelZone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(zoneSet) > 1 {
|
||||
// Regional PD
|
||||
return g.getRegionalDiskByName(name)
|
||||
}
|
||||
return g.getDiskByName(name, labelZone)
|
||||
}
|
||||
|
||||
// AttachDisk attaches given disk to the node with the specified NodeName.
|
||||
// Current instance is used when instanceID is empty string.
|
||||
func (g *Cloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error {
|
||||
@ -661,45 +697,47 @@ func (g *Cloud) BulkDisksAreAttached(diskByNodes map[types.NodeName][]string) (m
|
||||
// size, in the specified zone. It stores specified tags encoded in
|
||||
// JSON in Description field.
|
||||
func (g *Cloud) CreateDisk(
|
||||
name string, diskType string, zone string, sizeGb int64, tags map[string]string) error {
|
||||
name string, diskType string, zone string, sizeGb int64, tags map[string]string) (*Disk, error) {
|
||||
// Do not allow creation of PDs in zones that are do not have nodes. Such PDs
|
||||
// are not currently usable.
|
||||
curZones, err := g.GetAllCurrentZones()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if !curZones.Has(zone) {
|
||||
return fmt.Errorf("kubernetes does not have a node in zone %q", zone)
|
||||
return nil, fmt.Errorf("kubernetes does not have a node in zone %q", zone)
|
||||
}
|
||||
|
||||
tagsStr, err := g.encodeDiskTags(tags)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diskType, err = getDiskType(diskType)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mc := newDiskMetricContextZonal("create", g.region, zone)
|
||||
|
||||
err = g.manager.CreateDiskOnCloudProvider(
|
||||
disk, err := g.manager.CreateDiskOnCloudProvider(
|
||||
name, sizeGb, tagsStr, diskType, zone)
|
||||
|
||||
mc.Observe(err)
|
||||
if isGCEError(err, "alreadyExists") {
|
||||
klog.Warningf("GCE PD %q already exists, reusing", name)
|
||||
return nil
|
||||
if err != nil {
|
||||
if isGCEError(err, "alreadyExists") {
|
||||
klog.Warningf("GCE PD %q already exists, reusing", name)
|
||||
return g.manager.GetDiskFromCloudProvider(zone, name)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
return disk, nil
|
||||
}
|
||||
|
||||
// CreateRegionalDisk creates a new Regional Persistent Disk, with the specified
|
||||
// name & size, replicated to the specified zones. It stores specified tags
|
||||
// encoded in JSON in Description field.
|
||||
func (g *Cloud) CreateRegionalDisk(
|
||||
name string, diskType string, replicaZones sets.String, sizeGb int64, tags map[string]string) error {
|
||||
name string, diskType string, replicaZones sets.String, sizeGb int64, tags map[string]string) (*Disk, error) {
|
||||
|
||||
// Do not allow creation of PDs in zones that are do not have nodes. Such PDs
|
||||
// are not currently usable. This functionality should be reverted to checking
|
||||
@ -707,33 +745,36 @@ func (g *Cloud) CreateRegionalDisk(
|
||||
// in zones where there are no nodes
|
||||
curZones, err := g.GetAllCurrentZones()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if !curZones.IsSuperset(replicaZones) {
|
||||
return fmt.Errorf("kubernetes does not have nodes in specified zones: %q. Zones that contain nodes: %q", replicaZones.Difference(curZones), curZones)
|
||||
return nil, fmt.Errorf("kubernetes does not have nodes in specified zones: %q. Zones that contain nodes: %q", replicaZones.Difference(curZones), curZones)
|
||||
}
|
||||
|
||||
tagsStr, err := g.encodeDiskTags(tags)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diskType, err = getDiskType(diskType)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mc := newDiskMetricContextRegional("create", g.region)
|
||||
|
||||
err = g.manager.CreateRegionalDiskOnCloudProvider(
|
||||
disk, err := g.manager.CreateRegionalDiskOnCloudProvider(
|
||||
name, sizeGb, tagsStr, diskType, replicaZones)
|
||||
|
||||
mc.Observe(err)
|
||||
if isGCEError(err, "alreadyExists") {
|
||||
klog.Warningf("GCE PD %q already exists, reusing", name)
|
||||
return nil
|
||||
if err != nil {
|
||||
if isGCEError(err, "alreadyExists") {
|
||||
klog.Warningf("GCE PD %q already exists, reusing", name)
|
||||
return g.manager.GetRegionalDiskFromCloudProvider(name)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
return disk, nil
|
||||
}
|
||||
|
||||
func getDiskType(diskType string) (string, error) {
|
||||
@ -805,48 +846,7 @@ func (g *Cloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, newSi
|
||||
// GetAutoLabelsForPD builds the labels that should be automatically added to a PersistentVolume backed by a GCE PD
|
||||
// Specifically, this builds FailureDomain (zone) and Region labels.
|
||||
// The PersistentVolumeLabel admission controller calls this and adds the labels when a PV is created.
|
||||
// If zone is specified, the volume will only be found in the specified zone,
|
||||
// otherwise all managed zones will be searched.
|
||||
func (g *Cloud) GetAutoLabelsForPD(name string, zone string) (map[string]string, error) {
|
||||
var disk *Disk
|
||||
var err error
|
||||
if zone == "" {
|
||||
// For regional PDs this is fine, but for zonal PDs we would like as far
|
||||
// as possible to avoid this case, because GCE doesn't guarantee that
|
||||
// volumes are uniquely named per region, just per zone. However,
|
||||
// creation of GCE PDs was originally done only by name, so we have to
|
||||
// continue to support that.
|
||||
// However, wherever possible the zone should be passed (and it is
|
||||
// passed for most cases that we can control, e.g. dynamic volume
|
||||
// provisioning).
|
||||
disk, err = g.GetDiskByNameUnknownZone(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// We could assume the disks exists; we have all the information we need
|
||||
// However it is more consistent to ensure the disk exists,
|
||||
// and in future we may gather addition information (e.g. disk type, IOPS etc)
|
||||
zoneSet, err := volumehelpers.LabelZonesToSet(zone)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to parse zone field: %q. Will use raw field.", zone)
|
||||
}
|
||||
|
||||
if len(zoneSet) > 1 {
|
||||
// Regional PD
|
||||
disk, err = g.getRegionalDiskByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Zonal PD
|
||||
disk, err = g.getDiskByName(name, zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *Cloud) GetAutoLabelsForPD(disk *Disk) (map[string]string, error) {
|
||||
labels := make(map[string]string)
|
||||
switch zoneInfo := disk.ZoneInfo.(type) {
|
||||
case singleZone:
|
||||
|
@ -19,6 +19,7 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
@ -28,6 +29,7 @@ import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
)
|
||||
@ -63,7 +65,7 @@ func TestCreateDisk_Basic(t *testing.T) {
|
||||
expectedDescription := "{\"test-tag\":\"test-value\"}"
|
||||
|
||||
/* Act */
|
||||
err := gce.CreateDisk(diskName, diskType, zone, sizeGb, tags)
|
||||
_, err := gce.CreateDisk(diskName, diskType, zone, sizeGb, tags)
|
||||
|
||||
/* Assert */
|
||||
if err != nil {
|
||||
@ -117,7 +119,7 @@ func TestCreateRegionalDisk_Basic(t *testing.T) {
|
||||
expectedDescription := "{\"test-tag\":\"test-value\"}"
|
||||
|
||||
/* Act */
|
||||
err := gce.CreateRegionalDisk(diskName, diskType, replicaZones, sizeGb, tags)
|
||||
_, err := gce.CreateRegionalDisk(diskName, diskType, replicaZones, sizeGb, tags)
|
||||
|
||||
/* Assert */
|
||||
if err != nil {
|
||||
@ -166,7 +168,7 @@ func TestCreateDisk_DiskAlreadyExists(t *testing.T) {
|
||||
}
|
||||
|
||||
/* Act */
|
||||
err := gce.CreateDisk("disk", DiskTypeSSD, "zone1", 128, nil)
|
||||
_, err := gce.CreateDisk("disk", DiskTypeSSD, "zone1", 128, nil)
|
||||
|
||||
/* Assert */
|
||||
if err != nil {
|
||||
@ -192,7 +194,7 @@ func TestCreateDisk_WrongZone(t *testing.T) {
|
||||
const sizeGb int64 = 128
|
||||
|
||||
/* Act */
|
||||
err := gce.CreateDisk(diskName, diskType, "zone2", sizeGb, nil)
|
||||
_, err := gce.CreateDisk(diskName, diskType, "zone2", sizeGb, nil)
|
||||
|
||||
/* Assert */
|
||||
if err == nil {
|
||||
@ -217,7 +219,7 @@ func TestCreateDisk_NoManagedZone(t *testing.T) {
|
||||
const sizeGb int64 = 128
|
||||
|
||||
/* Act */
|
||||
err := gce.CreateDisk(diskName, diskType, "zone1", sizeGb, nil)
|
||||
_, err := gce.CreateDisk(diskName, diskType, "zone1", sizeGb, nil)
|
||||
|
||||
/* Assert */
|
||||
if err == nil {
|
||||
@ -242,7 +244,7 @@ func TestCreateDisk_BadDiskType(t *testing.T) {
|
||||
const sizeGb int64 = 128
|
||||
|
||||
/* Act */
|
||||
err := gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
|
||||
_, err := gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
|
||||
|
||||
/* Assert */
|
||||
if err == nil {
|
||||
@ -272,7 +274,7 @@ func TestCreateDisk_MultiZone(t *testing.T) {
|
||||
/* Act & Assert */
|
||||
for _, zone := range gce.managedZones {
|
||||
diskName = zone + "disk"
|
||||
err := gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
|
||||
_, err := gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Error creating disk in zone '%v'; error: \"%v\"", zone, err)
|
||||
}
|
||||
@ -437,7 +439,25 @@ func TestDeleteDisk_DiffDiskMultiZone(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAutoLabelsForPD_Basic(t *testing.T) {
|
||||
func pv(name, zone string) *v1.PersistentVolume {
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneFailureDomain: zone,
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLabelsForVolume_Basic(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
/* Arrange */
|
||||
gceProjectID := "test-project"
|
||||
gceRegion := "us-central1"
|
||||
@ -457,9 +477,8 @@ func TestGetAutoLabelsForPD_Basic(t *testing.T) {
|
||||
}
|
||||
|
||||
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
|
||||
|
||||
/* Act */
|
||||
labels, err := gce.GetAutoLabelsForPD(diskName, zone)
|
||||
labels, err := gce.GetLabelsForVolume(ctx, pv(diskName, zone))
|
||||
|
||||
/* Assert */
|
||||
if err != nil {
|
||||
@ -474,7 +493,8 @@ func TestGetAutoLabelsForPD_Basic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAutoLabelsForPD_NoZone(t *testing.T) {
|
||||
func TestGetLabelsForVolume_NoZone(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
/* Arrange */
|
||||
gceProjectID := "test-project"
|
||||
gceRegion := "europe-west1"
|
||||
@ -494,8 +514,11 @@ func TestGetAutoLabelsForPD_NoZone(t *testing.T) {
|
||||
}
|
||||
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
|
||||
|
||||
pv := pv(diskName, zone)
|
||||
delete(pv.Labels, v1.LabelZoneFailureDomain)
|
||||
|
||||
/* Act */
|
||||
labels, err := gce.GetAutoLabelsForPD(diskName, "")
|
||||
labels, err := gce.GetLabelsForVolume(ctx, pv)
|
||||
|
||||
/* Assert */
|
||||
if err != nil {
|
||||
@ -510,7 +533,8 @@ func TestGetAutoLabelsForPD_NoZone(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAutoLabelsForPD_DiskNotFound(t *testing.T) {
|
||||
func TestGetLabelsForVolume_DiskNotFound(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
/* Arrange */
|
||||
gceProjectID := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
@ -524,7 +548,7 @@ func TestGetAutoLabelsForPD_DiskNotFound(t *testing.T) {
|
||||
nodeInformerSynced: func() bool { return true }}
|
||||
|
||||
/* Act */
|
||||
_, err := gce.GetAutoLabelsForPD(diskName, zone)
|
||||
_, err := gce.GetLabelsForVolume(ctx, pv(diskName, zone))
|
||||
|
||||
/* Assert */
|
||||
if err == nil {
|
||||
@ -532,10 +556,12 @@ func TestGetAutoLabelsForPD_DiskNotFound(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAutoLabelsForPD_DiskNotFoundAndNoZone(t *testing.T) {
|
||||
func TestGetLabelsForVolume_DiskNotFoundAndNoZone(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
/* Arrange */
|
||||
gceProjectID := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zone := "asia-northeast1-a"
|
||||
zonesWithNodes := []string{}
|
||||
fakeManager := newFakeManager(gceProjectID, gceRegion)
|
||||
diskName := "disk"
|
||||
@ -548,8 +574,11 @@ func TestGetAutoLabelsForPD_DiskNotFoundAndNoZone(t *testing.T) {
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
|
||||
pv := pv(diskName, zone)
|
||||
delete(pv.Labels, v1.LabelZoneFailureDomain)
|
||||
|
||||
/* Act */
|
||||
_, err := gce.GetAutoLabelsForPD(diskName, "")
|
||||
_, err := gce.GetLabelsForVolume(ctx, pv)
|
||||
|
||||
/* Assert */
|
||||
if err == nil {
|
||||
@ -557,7 +586,8 @@ func TestGetAutoLabelsForPD_DiskNotFoundAndNoZone(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAutoLabelsForPD_DupDisk(t *testing.T) {
|
||||
func TestGetLabelsForVolume_DupDisk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
/* Arrange */
|
||||
gceProjectID := "test-project"
|
||||
gceRegion := "us-west1"
|
||||
@ -581,7 +611,7 @@ func TestGetAutoLabelsForPD_DupDisk(t *testing.T) {
|
||||
}
|
||||
|
||||
/* Act */
|
||||
labels, err := gce.GetAutoLabelsForPD(diskName, zone)
|
||||
labels, err := gce.GetLabelsForVolume(ctx, pv(diskName, zone))
|
||||
|
||||
/* Assert */
|
||||
if err != nil {
|
||||
@ -596,13 +626,15 @@ func TestGetAutoLabelsForPD_DupDisk(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAutoLabelsForPD_DupDiskNoZone(t *testing.T) {
|
||||
func TestGetLabelsForVolume_DupDiskNoZone(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
/* Arrange */
|
||||
gceProjectID := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"us-west1-b", "asia-southeast1-a"}
|
||||
fakeManager := newFakeManager(gceProjectID, gceRegion)
|
||||
diskName := "disk"
|
||||
zone := "us-west1-b"
|
||||
diskType := DiskTypeStandard
|
||||
const sizeGb int64 = 128
|
||||
|
||||
@ -618,8 +650,11 @@ func TestGetAutoLabelsForPD_DupDiskNoZone(t *testing.T) {
|
||||
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
|
||||
}
|
||||
|
||||
pv := pv(diskName, zone)
|
||||
delete(pv.Labels, v1.LabelZoneFailureDomain)
|
||||
|
||||
/* Act */
|
||||
_, err := gce.GetAutoLabelsForPD(diskName, "")
|
||||
_, err := gce.GetLabelsForVolume(ctx, pv)
|
||||
|
||||
/* Assert */
|
||||
if err == nil {
|
||||
@ -627,6 +662,102 @@ func TestGetAutoLabelsForPD_DupDiskNoZone(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAutoLabelsForPD(t *testing.T) {
|
||||
zonesWithNodes := []string{"us-west1-b", "asia-southeast1-a"}
|
||||
gceRegion := "us-west1"
|
||||
gceProjectID := "test-project"
|
||||
fakeManager := newFakeManager(gceProjectID, gceRegion)
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
diskName := "disk"
|
||||
zone1 := "us-west1-b"
|
||||
zone2 := "us-west1-a"
|
||||
const sizeGb int64 = 128
|
||||
|
||||
gce := Cloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
zoneInfo zoneType
|
||||
region string
|
||||
wantZoneLabel sets.String
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "basic singleZone",
|
||||
zoneInfo: singleZone{zone1},
|
||||
region: gceRegion,
|
||||
wantZoneLabel: sets.NewString(zone1),
|
||||
},
|
||||
{
|
||||
name: "basic multiZone",
|
||||
zoneInfo: multiZone{sets.NewString(zone1, zone2)},
|
||||
region: gceRegion,
|
||||
// Order of zones in label is nondeterministic.
|
||||
wantZoneLabel: sets.NewString("us-west1-a__us-west1-b", "us-west1-b__us-west1-a"),
|
||||
},
|
||||
{
|
||||
name: "empty singleZone",
|
||||
zoneInfo: singleZone{},
|
||||
region: gceRegion,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty region singleZone",
|
||||
zoneInfo: singleZone{zone1},
|
||||
region: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty zone set multiZone",
|
||||
zoneInfo: multiZone{sets.NewString()},
|
||||
region: gceRegion,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no Zoneinfo",
|
||||
zoneInfo: nil,
|
||||
region: gceRegion,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
disk := &Disk{
|
||||
ZoneInfo: tc.zoneInfo,
|
||||
Region: tc.region,
|
||||
Name: diskName,
|
||||
SizeGb: sizeGb,
|
||||
}
|
||||
|
||||
labels, err := gce.GetAutoLabelsForPD(disk)
|
||||
|
||||
if gotErr := err != nil; gotErr != tc.wantErr {
|
||||
t.Errorf("gce.GetAutoLabelsForPD(%+v) = %v; wantErr: %v", disk, err, tc.wantErr)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if got := labels[v1.LabelZoneFailureDomain]; !tc.wantZoneLabel.Has(got) {
|
||||
t.Errorf("labels[v1.LabelZoneFailureDomain] = %v; want one of: %v", got, tc.wantZoneLabel.List())
|
||||
}
|
||||
|
||||
// Validate labels
|
||||
if got := labels[v1.LabelZoneRegion]; got != gceRegion {
|
||||
t.Errorf("labels[v1.LabelZoneRegion] = %v; want: %v", got, gceRegion)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type targetClientAPI int
|
||||
|
||||
const (
|
||||
@ -673,7 +804,7 @@ func (manager *FakeServiceManager) CreateDiskOnCloudProvider(
|
||||
sizeGb int64,
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
zone string) error {
|
||||
zone string) (*Disk, error) {
|
||||
manager.createDiskCalled = true
|
||||
|
||||
switch t := manager.targetAPI; t {
|
||||
@ -687,7 +818,7 @@ func (manager *FakeServiceManager) CreateDiskOnCloudProvider(
|
||||
}
|
||||
manager.diskToCreateStable = diskToCreateV1
|
||||
manager.zonalDisks[zone] = diskToCreateV1.Name
|
||||
return nil
|
||||
return nil, nil
|
||||
case targetBeta:
|
||||
diskTypeURI := gceComputeAPIEndpoint + "projects/" + fmt.Sprintf(diskTypeURITemplateSingleZone, manager.gceProjectID, zone, diskType)
|
||||
diskToCreateBeta := &computebeta.Disk{
|
||||
@ -698,7 +829,7 @@ func (manager *FakeServiceManager) CreateDiskOnCloudProvider(
|
||||
}
|
||||
manager.diskToCreateBeta = diskToCreateBeta
|
||||
manager.zonalDisks[zone] = diskToCreateBeta.Name
|
||||
return nil
|
||||
return nil, nil
|
||||
case targetAlpha:
|
||||
diskTypeURI := gceComputeAPIEndpointBeta + "projects/" + fmt.Sprintf(diskTypeURITemplateSingleZone, manager.gceProjectID, zone, diskType)
|
||||
diskToCreateAlpha := &computealpha.Disk{
|
||||
@ -709,9 +840,9 @@ func (manager *FakeServiceManager) CreateDiskOnCloudProvider(
|
||||
}
|
||||
manager.diskToCreateAlpha = diskToCreateAlpha
|
||||
manager.zonalDisks[zone] = diskToCreateAlpha.Name
|
||||
return nil
|
||||
return nil, nil
|
||||
default:
|
||||
return fmt.Errorf("unexpected type: %T", t)
|
||||
return nil, fmt.Errorf("unexpected type: %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
@ -724,7 +855,7 @@ func (manager *FakeServiceManager) CreateRegionalDiskOnCloudProvider(
|
||||
sizeGb int64,
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
zones sets.String) error {
|
||||
zones sets.String) (*Disk, error) {
|
||||
|
||||
manager.createDiskCalled = true
|
||||
diskTypeURI := gceComputeAPIEndpoint + "projects/" + fmt.Sprintf(diskTypeURITemplateRegional, manager.gceProjectID, manager.gceRegion, diskType)
|
||||
@ -738,9 +869,9 @@ func (manager *FakeServiceManager) CreateRegionalDiskOnCloudProvider(
|
||||
}
|
||||
manager.diskToCreateStable = diskToCreateV1
|
||||
manager.regionalDisks[diskToCreateV1.Name] = zones
|
||||
return nil
|
||||
return nil, nil
|
||||
default:
|
||||
return fmt.Errorf("unexpected type: %T", t)
|
||||
return nil, fmt.Errorf("unexpected type: %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -216,7 +216,7 @@ func (p *Provider) CreatePD(zone string) (string, error) {
|
||||
}
|
||||
|
||||
tags := map[string]string{}
|
||||
if err := p.gceCloud.CreateDisk(pdName, gcecloud.DiskTypeStandard, zone, 2 /* sizeGb */, tags); err != nil {
|
||||
if _, err := p.gceCloud.CreateDisk(pdName, gcecloud.DiskTypeStandard, zone, 2 /* sizeGb */, tags); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return pdName, nil
|
||||
|
Loading…
Reference in New Issue
Block a user