Merge pull request #44884 from verult/master

Automatic merge from submit-queue (batch tested with PRs 46383, 45645, 45923, 44884, 46294)

Created unit tests for GCE cloud provider storage interface.

- Currently covers CreateDisk and DeleteDisk, GetAutoLabelsForPD
- Created ServiceManager interface in gce.go to facilitate mocking in tests.



**What this PR does / why we need it**:
Increasing test coverage for GCE Persistent Disk.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #44573 

**Release note**:

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2017-05-26 12:58:05 -07:00 committed by GitHub
commit f8cfeef174
4 changed files with 569 additions and 8 deletions

View File

@ -68,9 +68,18 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["gce_test.go"],
srcs = [
"gce_disks_test.go",
"gce_test.go",
],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/cloudprovider:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],
)
filegroup(

View File

@ -91,6 +91,25 @@ type GCECloud struct {
nodeInstancePrefix string // If non-"", an advisory prefix for all nodes in the cluster
useMetadataServer bool
operationPollRateLimiter flowcontrol.RateLimiter
manager ServiceManager
}
type ServiceManager interface {
// Creates a new persistent disk on GCE with the given disk spec.
CreateDisk(project string, zone string, disk *compute.Disk) (*compute.Operation, error)
// Gets the persistent disk from GCE with the given diskName.
GetDisk(project string, zone string, diskName string) (*compute.Disk, error)
// Deletes the persistent disk from GCE with the given diskName.
DeleteDisk(project string, zone string, disk string) (*compute.Operation, error)
// Waits until GCE reports the given operation in the given zone as done.
WaitForZoneOp(op *compute.Operation, zone string, mc *metricContext) error
}
type GCEServiceManager struct {
gce *GCECloud
}
type Config struct {
@ -230,7 +249,7 @@ func CreateGCECloud(projectID, region, zone string, managedZones []string, netwo
operationPollRateLimiter := flowcontrol.NewTokenBucketRateLimiter(10, 100) // 10 qps, 100 bucket size.
return &GCECloud{
gce := &GCECloud{
service: service,
serviceBeta: serviceBeta,
containerService: containerService,
@ -244,7 +263,10 @@ func CreateGCECloud(projectID, region, zone string, managedZones []string, netwo
nodeInstancePrefix: nodeInstancePrefix,
useMetadataServer: useMetadataServer,
operationPollRateLimiter: operationPollRateLimiter,
}, nil
}
gce.manager = &GCEServiceManager{gce}
return gce, nil
}
// Initialize takes in a clientBuilder and spawns a goroutine for watching the clusterid configmap.
@ -383,3 +405,31 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) {
return oauth2.NewClient(oauth2.NoContext, tokenSource), nil
}
func (manager *GCEServiceManager) CreateDisk(
project string,
zone string,
disk *compute.Disk) (*compute.Operation, error) {
return manager.gce.service.Disks.Insert(project, zone, disk).Do()
}
func (manager *GCEServiceManager) GetDisk(
project string,
zone string,
diskName string) (*compute.Disk, error) {
return manager.gce.service.Disks.Get(project, zone, diskName).Do()
}
func (manager *GCEServiceManager) DeleteDisk(
project string,
zone string,
diskName string) (*compute.Operation, error) {
return manager.gce.service.Disks.Delete(project, zone, diskName).Do()
}
func (manager *GCEServiceManager) WaitForZoneOp(op *compute.Operation, zone string, mc *metricContext) error {
return manager.gce.waitForZoneOp(op, zone, mc)
}

View File

@ -244,7 +244,7 @@ func (gce *GCECloud) CreateDisk(
}
mc := newDiskMetricContext("create", zone)
createOp, err := gce.service.Disks.Insert(gce.projectID, zone, diskToCreate).Do()
createOp, err := gce.manager.CreateDisk(gce.projectID, zone, diskToCreate)
if isGCEError(err, "alreadyExists") {
glog.Warningf("GCE PD %q already exists, reusing", name)
return nil
@ -252,7 +252,7 @@ func (gce *GCECloud) CreateDisk(
return mc.Observe(err)
}
err = gce.waitForZoneOp(createOp, zone, mc)
err = gce.manager.WaitForZoneOp(createOp, zone, mc)
if isGCEError(err, "alreadyExists") {
glog.Warningf("GCE PD %q already exists, reusing", name)
return nil
@ -323,7 +323,7 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st
// If not found, returns (nil, nil)
func (gce *GCECloud) findDiskByName(diskName string, zone string) (*GCEDisk, error) {
mc := newDiskMetricContext("get", zone)
disk, err := gce.service.Disks.Get(gce.projectID, zone, diskName).Do()
disk, err := gce.manager.GetDisk(gce.projectID, zone, diskName)
if err == nil {
d := &GCEDisk{
Zone: lastComponent(disk.Zone),
@ -410,12 +410,12 @@ func (gce *GCECloud) doDeleteDisk(diskToDelete string) error {
mc := newDiskMetricContext("delete", disk.Zone)
deleteOp, err := gce.service.Disks.Delete(gce.projectID, disk.Zone, disk.Name).Do()
deleteOp, err := gce.manager.DeleteDisk(gce.projectID, disk.Zone, disk.Name)
if err != nil {
return mc.Observe(err)
}
return gce.waitForZoneOp(deleteOp, disk.Zone, mc)
return gce.manager.WaitForZoneOp(deleteOp, disk.Zone, mc)
}
// Converts a Disk resource to an AttachedDisk resource.

View File

@ -0,0 +1,502 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce
import (
"testing"
"fmt"
compute "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
)
func TestCreateDisk_Basic(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
projectId := "test-project"
gce := GCECloud{
manager: fakeManager,
managedZones: []string{"zone1"},
projectID: projectId,
}
diskName := "disk"
diskType := DiskTypeSSD
zone := "zone1"
const sizeGb int64 = 128
tags := make(map[string]string)
tags["test-tag"] = "test-value"
diskTypeUri := fmt.Sprintf(diskTypeUriTemplate, projectId, zone, diskType)
expectedDescription := "{\"test-tag\":\"test-value\"}"
/* Act */
err := gce.CreateDisk(diskName, diskType, zone, sizeGb, tags)
/* Assert */
if err != nil {
t.Error(err)
}
if !fakeManager.createDiskCalled {
t.Error("Never called GCE disk create.")
}
if !fakeManager.doesOpMatch {
t.Error("Ops used in WaitForZoneOp does not match what's returned by CreateDisk.")
}
// Partial check of equality between disk description sent to GCE and parameters of method.
diskToCreate := fakeManager.diskToCreate
if diskToCreate.Name != diskName {
t.Errorf("Expected disk name: %s; Actual: %s", diskName, diskToCreate.Name)
}
if diskToCreate.Type != diskTypeUri {
t.Errorf("Expected disk type: %s; Actual: %s", diskTypeUri, diskToCreate.Type)
}
if diskToCreate.SizeGb != sizeGb {
t.Errorf("Expected disk size: %d; Actual: %d", sizeGb, diskToCreate.SizeGb)
}
if diskToCreate.Description != expectedDescription {
t.Errorf("Expected tag string: %s; Actual: %s", expectedDescription, diskToCreate.Description)
}
}
func TestCreateDisk_DiskAlreadyExists(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1"}}
// Inject disk AlreadyExists error.
alreadyExistsError := googleapi.ErrorItem{Reason: "alreadyExists"}
fakeManager.waitForZoneOpError = &googleapi.Error{
Errors: []googleapi.ErrorItem{alreadyExistsError},
}
/* Act */
err := gce.CreateDisk("disk", DiskTypeSSD, "zone1", 128, nil)
/* Assert */
if err != nil {
t.Error(
"Expected success when a disk with the given name already exists, but an error is returned.")
}
}
func TestCreateDisk_WrongZone(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1"}}
diskName := "disk"
diskType := DiskTypeSSD
const sizeGb int64 = 128
/* Act */
err := gce.CreateDisk(diskName, diskType, "zone2", sizeGb, nil)
/* Assert */
if err == nil {
t.Error("Expected error when zone is not managed, but none returned.")
}
}
func TestCreateDisk_NoManagedZone(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
gce := GCECloud{manager: fakeManager, managedZones: []string{}}
diskName := "disk"
diskType := DiskTypeSSD
const sizeGb int64 = 128
/* Act */
err := gce.CreateDisk(diskName, diskType, "zone1", sizeGb, nil)
/* Assert */
if err == nil {
t.Error("Expected error when managedZones is empty, but none returned.")
}
}
func TestCreateDisk_BadDiskType(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1"}}
diskName := "disk"
diskType := "arbitrary-disk"
zone := "zone1"
const sizeGb int64 = 128
/* Act */
err := gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
/* Assert */
if err == nil {
t.Error("Expected error when disk type is not supported, but none returned.")
}
}
func TestCreateDisk_MultiZone(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1", "zone2", "zone3"}}
diskName := "disk"
diskType := DiskTypeStandard
const sizeGb int64 = 128
/* Act & Assert */
for _, zone := range gce.managedZones {
diskName = zone + "disk"
err := gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
if err != nil {
t.Errorf("Error creating disk in zone '%v'; error: \"%v\"", zone, err)
}
}
}
func TestDeleteDisk_Basic(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1"}}
diskName := "disk"
diskType := DiskTypeSSD
zone := "zone1"
const sizeGb int64 = 128
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
/* Act */
err := gce.DeleteDisk(diskName)
/* Assert */
if err != nil {
t.Error(err)
}
if !fakeManager.deleteDiskCalled {
t.Error("Never called GCE disk delete.")
}
if !fakeManager.doesOpMatch {
t.Error("Ops used in WaitForZoneOp does not match what's returned by DeleteDisk.")
}
}
func TestDeleteDisk_NotFound(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1"}}
diskName := "disk"
/* Act */
err := gce.DeleteDisk(diskName)
/* Assert */
if err != nil {
t.Error("Expected successful operation when disk is not found, but an error is returned.")
}
}
func TestDeleteDisk_ResourceBeingUsed(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1"}}
diskName := "disk"
diskType := DiskTypeSSD
zone := "zone1"
const sizeGb int64 = 128
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
fakeManager.resourceInUse = true
/* Act */
err := gce.DeleteDisk(diskName)
/* Assert */
if err == nil {
t.Error("Expected error when disk is in use, but none returned.")
}
}
func TestDeleteDisk_SameDiskMultiZone(t *testing.T) {
/* Assert */
fakeManager := newFakeManager()
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1", "zone2", "zone3"}}
diskName := "disk"
diskType := DiskTypeSSD
const sizeGb int64 = 128
for _, zone := range gce.managedZones {
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
}
/* Act */
// DeleteDisk will call FakeServiceManager.GetDisk() with all zones,
// and FakeServiceManager.GetDisk() always returns a disk,
// so DeleteDisk thinks a disk with diskName exists in all zones.
err := gce.DeleteDisk(diskName)
/* Assert */
if err == nil {
t.Error("Expected error when disk is found in multiple zones, but none returned.")
}
}
func TestDeleteDisk_DiffDiskMultiZone(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1"}}
diskName := "disk"
diskType := DiskTypeSSD
const sizeGb int64 = 128
for _, zone := range gce.managedZones {
diskName = zone + "disk"
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
}
/* Act & Assert */
var err error
for _, zone := range gce.managedZones {
diskName = zone + "disk"
err = gce.DeleteDisk(diskName)
if err != nil {
t.Errorf("Error deleting disk in zone '%v'; error: \"%v\"", zone, err)
}
}
}
func TestGetAutoLabelsForPD_Basic(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
diskName := "disk"
diskType := DiskTypeSSD
zone := "us-central1-c"
const sizeGb int64 = 128
gce := GCECloud{manager: fakeManager, managedZones: []string{zone}}
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
/* Act */
labels, err := gce.GetAutoLabelsForPD(diskName, zone)
/* Assert */
if err != nil {
t.Error(err)
}
if labels[metav1.LabelZoneFailureDomain] != zone {
t.Errorf("Failure domain is '%v', but zone is '%v'",
labels[metav1.LabelZoneFailureDomain], zone)
}
if labels[metav1.LabelZoneRegion] != "us-central1" {
t.Errorf("Region is '%v', but zone is 'us-central1'", labels[metav1.LabelZoneRegion])
}
}
func TestGetAutoLabelsForPD_NoZone(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
diskName := "disk"
diskType := DiskTypeStandard
zone := "europe-west1-d"
const sizeGb int64 = 128
gce := GCECloud{manager: fakeManager, managedZones: []string{zone}}
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
/* Act */
labels, err := gce.GetAutoLabelsForPD(diskName, "")
/* Assert */
if err != nil {
t.Error(err)
}
if labels[metav1.LabelZoneFailureDomain] != zone {
t.Errorf("Failure domain is '%v', but zone is '%v'",
labels[metav1.LabelZoneFailureDomain], zone)
}
if labels[metav1.LabelZoneRegion] != "europe-west1" {
t.Errorf("Region is '%v', but zone is 'europe-west1'", labels[metav1.LabelZoneRegion])
}
}
func TestGetAutoLabelsForPD_DiskNotFound(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
diskName := "disk"
zone := "asia-northeast1-a"
gce := GCECloud{manager: fakeManager, managedZones: []string{zone}}
/* Act */
_, err := gce.GetAutoLabelsForPD(diskName, zone)
/* Assert */
if err == nil {
t.Error("Expected error when the specified disk does not exist, but none returned.")
}
}
func TestGetAutoLabelsForPD_DiskNotFoundAndNoZone(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
diskName := "disk"
gce := GCECloud{manager: fakeManager, managedZones: []string{}}
/* Act */
_, err := gce.GetAutoLabelsForPD(diskName, "")
/* Assert */
if err == nil {
t.Error("Expected error when the specified disk does not exist, but none returned.")
}
}
func TestGetAutoLabelsForPD_DupDisk(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
diskName := "disk"
diskType := DiskTypeStandard
zone := "us-west1-b"
const sizeGb int64 = 128
gce := GCECloud{manager: fakeManager, managedZones: []string{"us-west1-b", "asia-southeast1-a"}}
for _, zone := range gce.managedZones {
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
}
/* Act */
labels, err := gce.GetAutoLabelsForPD(diskName, zone)
/* Assert */
if err != nil {
t.Error("Disk name and zone uniquely identifies a disk, yet an error is returned.")
}
if labels[metav1.LabelZoneFailureDomain] != zone {
t.Errorf("Failure domain is '%v', but zone is '%v'",
labels[metav1.LabelZoneFailureDomain], zone)
}
if labels[metav1.LabelZoneRegion] != "us-west1" {
t.Errorf("Region is '%v', but zone is 'us-west1'", labels[metav1.LabelZoneRegion])
}
}
func TestGetAutoLabelsForPD_DupDiskNoZone(t *testing.T) {
/* Arrange */
fakeManager := newFakeManager()
diskName := "disk"
diskType := DiskTypeStandard
const sizeGb int64 = 128
gce := GCECloud{manager: fakeManager, managedZones: []string{"us-west1-b", "asia-southeast1-a"}}
for _, zone := range gce.managedZones {
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
}
/* Act */
_, err := gce.GetAutoLabelsForPD(diskName, "")
/* Assert */
if err == nil {
t.Error("Expected error when the disk is duplicated and zone is not specified, but none returned.")
}
}
type FakeServiceManager struct {
// Common fields shared among tests
op *compute.Operation // Mocks an operation returned by GCE API calls
doesOpMatch bool
disks map[string]string // zone: diskName
waitForZoneOpError error // Error to be returned by WaitForZoneOp
// Fields for TestCreateDisk
createDiskCalled bool
diskToCreate *compute.Disk
// Fields for TestDeleteDisk
deleteDiskCalled bool
resourceInUse bool // Marks the disk as in-use
}
func newFakeManager() *FakeServiceManager {
return &FakeServiceManager{disks: make(map[string]string)}
}
/**
* Upon disk creation, disk info is stored in FakeServiceManager
* to be used by other tested methods.
*/
func (manager *FakeServiceManager) CreateDisk(
project string,
zone string,
disk *compute.Disk) (*compute.Operation, error) {
manager.createDiskCalled = true
op := &compute.Operation{}
manager.op = op
manager.diskToCreate = disk
manager.disks[zone] = disk.Name
return op, nil
}
/**
* Gets disk info stored in the FakeServiceManager.
*/
func (manager *FakeServiceManager) GetDisk(
project string,
zone string,
diskName string) (*compute.Disk, error) {
if manager.disks[zone] == "" {
return nil, cloudprovider.DiskNotFound
}
if manager.resourceInUse {
errorItem := googleapi.ErrorItem{Reason: "resourceInUseByAnotherResource"}
err := &googleapi.Error{Errors: []googleapi.ErrorItem{errorItem}}
return nil, err
}
disk := &compute.Disk{Name: diskName, Zone: zone, Kind: "compute#disk"}
return disk, nil
}
/**
* Disk info is removed from the FakeServiceManager.
*/
func (manager *FakeServiceManager) DeleteDisk(
project string,
zone string,
disk string) (*compute.Operation, error) {
manager.deleteDiskCalled = true
op := &compute.Operation{}
manager.op = op
manager.disks[zone] = ""
return op, nil
}
func (manager *FakeServiceManager) WaitForZoneOp(
op *compute.Operation,
zone string,
mc *metricContext) error {
if op == manager.op {
manager.doesOpMatch = true
}
return manager.waitForZoneOpError
}