Merge pull request #70716 from jingxu97/Nov/versionupdate

Move Regional PD to GA
This commit is contained in:
k8s-ci-robot 2018-11-08 23:45:45 -08:00 committed by GitHub
commit 8c2509de5b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 151 additions and 161 deletions

View File

@ -48,7 +48,7 @@ type Cloud interface {
RegionBackendServices() RegionBackendServices RegionBackendServices() RegionBackendServices
AlphaRegionBackendServices() AlphaRegionBackendServices AlphaRegionBackendServices() AlphaRegionBackendServices
Disks() Disks Disks() Disks
BetaRegionDisks() BetaRegionDisks RegionDisks() RegionDisks
Firewalls() Firewalls Firewalls() Firewalls
ForwardingRules() ForwardingRules ForwardingRules() ForwardingRules
AlphaForwardingRules() AlphaForwardingRules AlphaForwardingRules() AlphaForwardingRules
@ -89,7 +89,7 @@ func NewGCE(s *Service) *GCE {
gceRegionBackendServices: &GCERegionBackendServices{s}, gceRegionBackendServices: &GCERegionBackendServices{s},
gceAlphaRegionBackendServices: &GCEAlphaRegionBackendServices{s}, gceAlphaRegionBackendServices: &GCEAlphaRegionBackendServices{s},
gceDisks: &GCEDisks{s}, gceDisks: &GCEDisks{s},
gceBetaRegionDisks: &GCEBetaRegionDisks{s}, gceRegionDisks: &GCERegionDisks{s},
gceFirewalls: &GCEFirewalls{s}, gceFirewalls: &GCEFirewalls{s},
gceForwardingRules: &GCEForwardingRules{s}, gceForwardingRules: &GCEForwardingRules{s},
gceAlphaForwardingRules: &GCEAlphaForwardingRules{s}, gceAlphaForwardingRules: &GCEAlphaForwardingRules{s},
@ -134,7 +134,7 @@ type GCE struct {
gceRegionBackendServices *GCERegionBackendServices gceRegionBackendServices *GCERegionBackendServices
gceAlphaRegionBackendServices *GCEAlphaRegionBackendServices gceAlphaRegionBackendServices *GCEAlphaRegionBackendServices
gceDisks *GCEDisks gceDisks *GCEDisks
gceBetaRegionDisks *GCEBetaRegionDisks gceRegionDisks *GCERegionDisks
gceFirewalls *GCEFirewalls gceFirewalls *GCEFirewalls
gceForwardingRules *GCEForwardingRules gceForwardingRules *GCEForwardingRules
gceAlphaForwardingRules *GCEAlphaForwardingRules gceAlphaForwardingRules *GCEAlphaForwardingRules
@ -212,9 +212,9 @@ func (gce *GCE) Disks() Disks {
return gce.gceDisks return gce.gceDisks
} }
// BetaRegionDisks returns the interface for the beta RegionDisks. // RegionDisks returns the interface for the ga RegionDisks.
func (gce *GCE) BetaRegionDisks() BetaRegionDisks { func (gce *GCE) RegionDisks() RegionDisks {
return gce.gceBetaRegionDisks return gce.gceRegionDisks
} }
// Firewalls returns the interface for the ga Firewalls. // Firewalls returns the interface for the ga Firewalls.
@ -381,7 +381,7 @@ func NewMockGCE(projectRouter ProjectRouter) *MockGCE {
MockRegionBackendServices: NewMockRegionBackendServices(projectRouter, mockRegionBackendServicesObjs), MockRegionBackendServices: NewMockRegionBackendServices(projectRouter, mockRegionBackendServicesObjs),
MockAlphaRegionBackendServices: NewMockAlphaRegionBackendServices(projectRouter, mockRegionBackendServicesObjs), MockAlphaRegionBackendServices: NewMockAlphaRegionBackendServices(projectRouter, mockRegionBackendServicesObjs),
MockDisks: NewMockDisks(projectRouter, mockDisksObjs), MockDisks: NewMockDisks(projectRouter, mockDisksObjs),
MockBetaRegionDisks: NewMockBetaRegionDisks(projectRouter, mockRegionDisksObjs), MockRegionDisks: NewMockRegionDisks(projectRouter, mockRegionDisksObjs),
MockFirewalls: NewMockFirewalls(projectRouter, mockFirewallsObjs), MockFirewalls: NewMockFirewalls(projectRouter, mockFirewallsObjs),
MockForwardingRules: NewMockForwardingRules(projectRouter, mockForwardingRulesObjs), MockForwardingRules: NewMockForwardingRules(projectRouter, mockForwardingRulesObjs),
MockAlphaForwardingRules: NewMockAlphaForwardingRules(projectRouter, mockForwardingRulesObjs), MockAlphaForwardingRules: NewMockAlphaForwardingRules(projectRouter, mockForwardingRulesObjs),
@ -426,7 +426,7 @@ type MockGCE struct {
MockRegionBackendServices *MockRegionBackendServices MockRegionBackendServices *MockRegionBackendServices
MockAlphaRegionBackendServices *MockAlphaRegionBackendServices MockAlphaRegionBackendServices *MockAlphaRegionBackendServices
MockDisks *MockDisks MockDisks *MockDisks
MockBetaRegionDisks *MockBetaRegionDisks MockRegionDisks *MockRegionDisks
MockFirewalls *MockFirewalls MockFirewalls *MockFirewalls
MockForwardingRules *MockForwardingRules MockForwardingRules *MockForwardingRules
MockAlphaForwardingRules *MockAlphaForwardingRules MockAlphaForwardingRules *MockAlphaForwardingRules
@ -504,9 +504,9 @@ func (mock *MockGCE) Disks() Disks {
return mock.MockDisks return mock.MockDisks
} }
// BetaRegionDisks returns the interface for the beta RegionDisks. // RegionDisks returns the interface for the ga RegionDisks.
func (mock *MockGCE) BetaRegionDisks() BetaRegionDisks { func (mock *MockGCE) RegionDisks() RegionDisks {
return mock.MockBetaRegionDisks return mock.MockRegionDisks
} }
// Firewalls returns the interface for the ga Firewalls. // Firewalls returns the interface for the ga Firewalls.
@ -1084,15 +1084,15 @@ type MockRegionDisksObj struct {
Obj interface{} Obj interface{}
} }
// ToBeta retrieves the given version of the object. // ToGA retrieves the given version of the object.
func (m *MockRegionDisksObj) ToBeta() *beta.Disk { func (m *MockRegionDisksObj) ToGA() *ga.Disk {
if ret, ok := m.Obj.(*beta.Disk); ok { if ret, ok := m.Obj.(*ga.Disk); ok {
return ret return ret
} }
// Convert the object via JSON copying to the type that was requested. // Convert the object via JSON copying to the type that was requested.
ret := &beta.Disk{} ret := &ga.Disk{}
if err := copyViaJSON(ret, m.Obj); err != nil { if err := copyViaJSON(ret, m.Obj); err != nil {
glog.Errorf("Could not convert %T to *beta.Disk via JSON: %v", m.Obj, err) glog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err)
} }
return ret return ret
} }
@ -5110,18 +5110,18 @@ func (g *GCEDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResi
return err return err
} }
// BetaRegionDisks is an interface that allows for mocking of RegionDisks. // RegionDisks is an interface that allows for mocking of RegionDisks.
type BetaRegionDisks interface { type RegionDisks interface {
Get(ctx context.Context, key *meta.Key) (*beta.Disk, error) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error)
List(ctx context.Context, region string, fl *filter.F) ([]*beta.Disk, error) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error)
Insert(ctx context.Context, key *meta.Key, obj *beta.Disk) error Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error
Delete(ctx context.Context, key *meta.Key) error Delete(ctx context.Context, key *meta.Key) error
Resize(context.Context, *meta.Key, *beta.RegionDisksResizeRequest) error Resize(context.Context, *meta.Key, *ga.RegionDisksResizeRequest) error
} }
// NewMockBetaRegionDisks returns a new mock for RegionDisks. // NewMockRegionDisks returns a new mock for RegionDisks.
func NewMockBetaRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisksObj) *MockBetaRegionDisks { func NewMockRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisksObj) *MockRegionDisks {
mock := &MockBetaRegionDisks{ mock := &MockRegionDisks{
ProjectRouter: pr, ProjectRouter: pr,
Objects: objs, Objects: objs,
@ -5132,8 +5132,8 @@ func NewMockBetaRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisks
return mock return mock
} }
// MockBetaRegionDisks is the mock for RegionDisks. // MockRegionDisks is the mock for RegionDisks.
type MockBetaRegionDisks struct { type MockRegionDisks struct {
Lock sync.Mutex Lock sync.Mutex
ProjectRouter ProjectRouter ProjectRouter ProjectRouter
@ -5152,11 +5152,11 @@ type MockBetaRegionDisks struct {
// order to add your own logic. Return (true, _, _) to prevent the normal // order to add your own logic. Return (true, _, _) to prevent the normal
// execution flow of the mock. Return (false, nil, nil) to continue with // execution flow of the mock. Return (false, nil, nil) to continue with
// normal mock behavior/ after the hook function executes. // normal mock behavior/ after the hook function executes.
GetHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionDisks) (bool, *beta.Disk, error) GetHook func(ctx context.Context, key *meta.Key, m *MockRegionDisks) (bool, *ga.Disk, error)
ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaRegionDisks) (bool, []*beta.Disk, error) ListHook func(ctx context.Context, region string, fl *filter.F, m *MockRegionDisks) (bool, []*ga.Disk, error)
InsertHook func(ctx context.Context, key *meta.Key, obj *beta.Disk, m *MockBetaRegionDisks) (bool, error) InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Disk, m *MockRegionDisks) (bool, error)
DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionDisks) (bool, error) DeleteHook func(ctx context.Context, key *meta.Key, m *MockRegionDisks) (bool, error)
ResizeHook func(context.Context, *meta.Key, *beta.RegionDisksResizeRequest, *MockBetaRegionDisks) error ResizeHook func(context.Context, *meta.Key, *ga.RegionDisksResizeRequest, *MockRegionDisks) error
// X is extra state that can be used as part of the mock. Generated code // X is extra state that can be used as part of the mock. Generated code
// will not use this field. // will not use this field.
@ -5164,10 +5164,10 @@ type MockBetaRegionDisks struct {
} }
// Get returns the object from the mock. // Get returns the object from the mock.
func (m *MockBetaRegionDisks) Get(ctx context.Context, key *meta.Key) (*beta.Disk, error) { func (m *MockRegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) {
if m.GetHook != nil { if m.GetHook != nil {
if intercept, obj, err := m.GetHook(ctx, key, m); intercept { if intercept, obj, err := m.GetHook(ctx, key, m); intercept {
glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) glog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err)
return obj, err return obj, err
} }
} }
@ -5179,28 +5179,28 @@ func (m *MockBetaRegionDisks) Get(ctx context.Context, key *meta.Key) (*beta.Dis
defer m.Lock.Unlock() defer m.Lock.Unlock()
if err, ok := m.GetError[*key]; ok { if err, ok := m.GetError[*key]; ok {
glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) glog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err)
return nil, err return nil, err
} }
if obj, ok := m.Objects[*key]; ok { if obj, ok := m.Objects[*key]; ok {
typedObj := obj.ToBeta() typedObj := obj.ToGA()
glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) glog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj)
return typedObj, nil return typedObj, nil
} }
err := &googleapi.Error{ err := &googleapi.Error{
Code: http.StatusNotFound, Code: http.StatusNotFound,
Message: fmt.Sprintf("MockBetaRegionDisks %v not found", key), Message: fmt.Sprintf("MockRegionDisks %v not found", key),
} }
glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) glog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err)
return nil, err return nil, err
} }
// List all of the objects in the mock in the given region. // List all of the objects in the mock in the given region.
func (m *MockBetaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Disk, error) { func (m *MockRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) {
if m.ListHook != nil { if m.ListHook != nil {
if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept {
glog.V(5).Infof("MockBetaRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) glog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err)
return objs, err return objs, err
} }
} }
@ -5210,31 +5210,31 @@ func (m *MockBetaRegionDisks) List(ctx context.Context, region string, fl *filte
if m.ListError != nil { if m.ListError != nil {
err := *m.ListError err := *m.ListError
glog.V(5).Infof("MockBetaRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) glog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err)
return nil, *m.ListError return nil, *m.ListError
} }
var objs []*beta.Disk var objs []*ga.Disk
for key, obj := range m.Objects { for key, obj := range m.Objects {
if key.Region != region { if key.Region != region {
continue continue
} }
if !fl.Match(obj.ToBeta()) { if !fl.Match(obj.ToGA()) {
continue continue
} }
objs = append(objs, obj.ToBeta()) objs = append(objs, obj.ToGA())
} }
glog.V(5).Infof("MockBetaRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) glog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs))
return objs, nil return objs, nil
} }
// Insert is a mock for inserting/creating a new object. // Insert is a mock for inserting/creating a new object.
func (m *MockBetaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *beta.Disk) error { func (m *MockRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error {
if m.InsertHook != nil { if m.InsertHook != nil {
if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept {
glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) glog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
return err return err
} }
} }
@ -5246,32 +5246,32 @@ func (m *MockBetaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *be
defer m.Lock.Unlock() defer m.Lock.Unlock()
if err, ok := m.InsertError[*key]; ok { if err, ok := m.InsertError[*key]; ok {
glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) glog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
return err return err
} }
if _, ok := m.Objects[*key]; ok { if _, ok := m.Objects[*key]; ok {
err := &googleapi.Error{ err := &googleapi.Error{
Code: http.StatusConflict, Code: http.StatusConflict,
Message: fmt.Sprintf("MockBetaRegionDisks %v exists", key), Message: fmt.Sprintf("MockRegionDisks %v exists", key),
} }
glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) glog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
return err return err
} }
obj.Name = key.Name obj.Name = key.Name
projectID := m.ProjectRouter.ProjectID(ctx, "beta", "disks") projectID := m.ProjectRouter.ProjectID(ctx, "ga", "disks")
obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "disks", key) obj.SelfLink = SelfLink(meta.VersionGA, projectID, "disks", key)
m.Objects[*key] = &MockRegionDisksObj{obj} m.Objects[*key] = &MockRegionDisksObj{obj}
glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) glog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj)
return nil return nil
} }
// Delete is a mock for deleting the object. // Delete is a mock for deleting the object.
func (m *MockBetaRegionDisks) Delete(ctx context.Context, key *meta.Key) error { func (m *MockRegionDisks) Delete(ctx context.Context, key *meta.Key) error {
if m.DeleteHook != nil { if m.DeleteHook != nil {
if intercept, err := m.DeleteHook(ctx, key, m); intercept { if intercept, err := m.DeleteHook(ctx, key, m); intercept {
glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) glog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
return err return err
} }
} }
@ -5283,207 +5283,207 @@ func (m *MockBetaRegionDisks) Delete(ctx context.Context, key *meta.Key) error {
defer m.Lock.Unlock() defer m.Lock.Unlock()
if err, ok := m.DeleteError[*key]; ok { if err, ok := m.DeleteError[*key]; ok {
glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) glog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
return err return err
} }
if _, ok := m.Objects[*key]; !ok { if _, ok := m.Objects[*key]; !ok {
err := &googleapi.Error{ err := &googleapi.Error{
Code: http.StatusNotFound, Code: http.StatusNotFound,
Message: fmt.Sprintf("MockBetaRegionDisks %v not found", key), Message: fmt.Sprintf("MockRegionDisks %v not found", key),
} }
glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) glog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
return err return err
} }
delete(m.Objects, *key) delete(m.Objects, *key)
glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = nil", ctx, key) glog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = nil", ctx, key)
return nil return nil
} }
// Obj wraps the object for use in the mock. // Obj wraps the object for use in the mock.
func (m *MockBetaRegionDisks) Obj(o *beta.Disk) *MockRegionDisksObj { func (m *MockRegionDisks) Obj(o *ga.Disk) *MockRegionDisksObj {
return &MockRegionDisksObj{o} return &MockRegionDisksObj{o}
} }
// Resize is a mock for the corresponding method. // Resize is a mock for the corresponding method.
func (m *MockBetaRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *beta.RegionDisksResizeRequest) error { func (m *MockRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.RegionDisksResizeRequest) error {
if m.ResizeHook != nil { if m.ResizeHook != nil {
return m.ResizeHook(ctx, key, arg0, m) return m.ResizeHook(ctx, key, arg0, m)
} }
return nil return nil
} }
// GCEBetaRegionDisks is a simplifying adapter for the GCE RegionDisks. // GCERegionDisks is a simplifying adapter for the GCE RegionDisks.
type GCEBetaRegionDisks struct { type GCERegionDisks struct {
s *Service s *Service
} }
// Get the Disk named by key. // Get the Disk named by key.
func (g *GCEBetaRegionDisks) Get(ctx context.Context, key *meta.Key) (*beta.Disk, error) { func (g *GCERegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) {
glog.V(5).Infof("GCEBetaRegionDisks.Get(%v, %v): called", ctx, key) glog.V(5).Infof("GCERegionDisks.Get(%v, %v): called", ctx, key)
if !key.Valid() { if !key.Valid() {
glog.V(2).Infof("GCEBetaRegionDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) glog.V(2).Infof("GCERegionDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key)
return nil, fmt.Errorf("invalid GCE key (%#v)", key) return nil, fmt.Errorf("invalid GCE key (%#v)", key)
} }
projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks")
rk := &RateLimitKey{ rk := &RateLimitKey{
ProjectID: projectID, ProjectID: projectID,
Operation: "Get", Operation: "Get",
Version: meta.Version("beta"), Version: meta.Version("ga"),
Service: "RegionDisks", Service: "RegionDisks",
} }
glog.V(5).Infof("GCEBetaRegionDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) glog.V(5).Infof("GCERegionDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("GCEBetaRegionDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) glog.V(4).Infof("GCERegionDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err)
return nil, err return nil, err
} }
call := g.s.Beta.RegionDisks.Get(projectID, key.Region, key.Name) call := g.s.GA.RegionDisks.Get(projectID, key.Region, key.Name)
call.Context(ctx) call.Context(ctx)
v, err := call.Do() v, err := call.Do()
glog.V(4).Infof("GCEBetaRegionDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) glog.V(4).Infof("GCERegionDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err)
return v, err return v, err
} }
// List all Disk objects. // List all Disk objects.
func (g *GCEBetaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Disk, error) { func (g *GCERegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) {
glog.V(5).Infof("GCEBetaRegionDisks.List(%v, %v, %v) called", ctx, region, fl) glog.V(5).Infof("GCERegionDisks.List(%v, %v, %v) called", ctx, region, fl)
projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks")
rk := &RateLimitKey{ rk := &RateLimitKey{
ProjectID: projectID, ProjectID: projectID,
Operation: "List", Operation: "List",
Version: meta.Version("beta"), Version: meta.Version("ga"),
Service: "RegionDisks", Service: "RegionDisks",
} }
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
return nil, err return nil, err
} }
glog.V(5).Infof("GCEBetaRegionDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) glog.V(5).Infof("GCERegionDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk)
call := g.s.Beta.RegionDisks.List(projectID, region) call := g.s.GA.RegionDisks.List(projectID, region)
if fl != filter.None { if fl != filter.None {
call.Filter(fl.String()) call.Filter(fl.String())
} }
var all []*beta.Disk var all []*ga.Disk
f := func(l *beta.DiskList) error { f := func(l *ga.DiskList) error {
glog.V(5).Infof("GCEBetaRegionDisks.List(%v, ..., %v): page %+v", ctx, fl, l) glog.V(5).Infof("GCERegionDisks.List(%v, ..., %v): page %+v", ctx, fl, l)
all = append(all, l.Items...) all = append(all, l.Items...)
return nil return nil
} }
if err := call.Pages(ctx, f); err != nil { if err := call.Pages(ctx, f); err != nil {
glog.V(4).Infof("GCEBetaRegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) glog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err)
return nil, err return nil, err
} }
if glog.V(4) { if glog.V(4) {
glog.V(4).Infof("GCEBetaRegionDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) glog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil)
} else if glog.V(5) { } else if glog.V(5) {
var asStr []string var asStr []string
for _, o := range all { for _, o := range all {
asStr = append(asStr, fmt.Sprintf("%+v", o)) asStr = append(asStr, fmt.Sprintf("%+v", o))
} }
glog.V(5).Infof("GCEBetaRegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) glog.V(5).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil)
} }
return all, nil return all, nil
} }
// Insert Disk with key of value obj. // Insert Disk with key of value obj.
func (g *GCEBetaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *beta.Disk) error { func (g *GCERegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error {
glog.V(5).Infof("GCEBetaRegionDisks.Insert(%v, %v, %+v): called", ctx, key, obj) glog.V(5).Infof("GCERegionDisks.Insert(%v, %v, %+v): called", ctx, key, obj)
if !key.Valid() { if !key.Valid() {
glog.V(2).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) glog.V(2).Infof("GCERegionDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key)
return fmt.Errorf("invalid GCE key (%+v)", key) return fmt.Errorf("invalid GCE key (%+v)", key)
} }
projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks")
rk := &RateLimitKey{ rk := &RateLimitKey{
ProjectID: projectID, ProjectID: projectID,
Operation: "Insert", Operation: "Insert",
Version: meta.Version("beta"), Version: meta.Version("ga"),
Service: "RegionDisks", Service: "RegionDisks",
} }
glog.V(5).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) glog.V(5).Infof("GCERegionDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) glog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err)
return err return err
} }
obj.Name = key.Name obj.Name = key.Name
call := g.s.Beta.RegionDisks.Insert(projectID, key.Region, obj) call := g.s.GA.RegionDisks.Insert(projectID, key.Region, obj)
call.Context(ctx) call.Context(ctx)
op, err := call.Do() op, err := call.Do()
if err != nil { if err != nil {
glog.V(4).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) glog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...) = %+v", ctx, key, err)
return err return err
} }
err = g.s.WaitForCompletion(ctx, op) err = g.s.WaitForCompletion(ctx, op)
glog.V(4).Infof("GCEBetaRegionDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) glog.V(4).Infof("GCERegionDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err)
return err return err
} }
// Delete the Disk referenced by key. // Delete the Disk referenced by key.
func (g *GCEBetaRegionDisks) Delete(ctx context.Context, key *meta.Key) error { func (g *GCERegionDisks) Delete(ctx context.Context, key *meta.Key) error {
glog.V(5).Infof("GCEBetaRegionDisks.Delete(%v, %v): called", ctx, key) glog.V(5).Infof("GCERegionDisks.Delete(%v, %v): called", ctx, key)
if !key.Valid() { if !key.Valid() {
glog.V(2).Infof("GCEBetaRegionDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) glog.V(2).Infof("GCERegionDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key)
return fmt.Errorf("invalid GCE key (%+v)", key) return fmt.Errorf("invalid GCE key (%+v)", key)
} }
projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks")
rk := &RateLimitKey{ rk := &RateLimitKey{
ProjectID: projectID, ProjectID: projectID,
Operation: "Delete", Operation: "Delete",
Version: meta.Version("beta"), Version: meta.Version("ga"),
Service: "RegionDisks", Service: "RegionDisks",
} }
glog.V(5).Infof("GCEBetaRegionDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) glog.V(5).Infof("GCERegionDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("GCEBetaRegionDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) glog.V(4).Infof("GCERegionDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err)
return err return err
} }
call := g.s.Beta.RegionDisks.Delete(projectID, key.Region, key.Name) call := g.s.GA.RegionDisks.Delete(projectID, key.Region, key.Name)
call.Context(ctx) call.Context(ctx)
op, err := call.Do() op, err := call.Do()
if err != nil { if err != nil {
glog.V(4).Infof("GCEBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) glog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err)
return err return err
} }
err = g.s.WaitForCompletion(ctx, op) err = g.s.WaitForCompletion(ctx, op)
glog.V(4).Infof("GCEBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) glog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err)
return err return err
} }
// Resize is a method on GCEBetaRegionDisks. // Resize is a method on GCERegionDisks.
func (g *GCEBetaRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *beta.RegionDisksResizeRequest) error { func (g *GCERegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.RegionDisksResizeRequest) error {
glog.V(5).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): called", ctx, key) glog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): called", ctx, key)
if !key.Valid() { if !key.Valid() {
glog.V(2).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) glog.V(2).Infof("GCERegionDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key)
return fmt.Errorf("invalid GCE key (%+v)", key) return fmt.Errorf("invalid GCE key (%+v)", key)
} }
projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks")
rk := &RateLimitKey{ rk := &RateLimitKey{
ProjectID: projectID, ProjectID: projectID,
Operation: "Resize", Operation: "Resize",
Version: meta.Version("beta"), Version: meta.Version("ga"),
Service: "RegionDisks", Service: "RegionDisks",
} }
glog.V(5).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) glog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) glog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err)
return err return err
} }
call := g.s.Beta.RegionDisks.Resize(projectID, key.Region, key.Name, arg0) call := g.s.GA.RegionDisks.Resize(projectID, key.Region, key.Name, arg0)
call.Context(ctx) call.Context(ctx)
op, err := call.Do() op, err := call.Do()
if err != nil { if err != nil {
glog.V(4).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) glog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err)
return err return err
} }
err = g.s.WaitForCompletion(ctx, op) err = g.s.WaitForCompletion(ctx, op)
glog.V(4).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) glog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err)
return err return err
} }

View File

@ -1351,58 +1351,58 @@ func TestRegionDisksGroup(t *testing.T) {
mock := NewMockGCE(pr) mock := NewMockGCE(pr)
var key *meta.Key var key *meta.Key
keyBeta := meta.RegionalKey("key-beta", "location") keyGA := meta.RegionalKey("key-ga", "location")
key = keyBeta key = keyGA
// Ignore unused variables. // Ignore unused variables.
_, _, _ = ctx, mock, key _, _, _ = ctx, mock, key
// Get not found. // Get not found.
if _, err := mock.BetaRegionDisks().Get(ctx, key); err == nil { if _, err := mock.RegionDisks().Get(ctx, key); err == nil {
t.Errorf("BetaRegionDisks().Get(%v, %v) = _, nil; want error", ctx, key) t.Errorf("RegionDisks().Get(%v, %v) = _, nil; want error", ctx, key)
} }
// Insert. // Insert.
{ {
obj := &beta.Disk{} obj := &ga.Disk{}
if err := mock.BetaRegionDisks().Insert(ctx, keyBeta, obj); err != nil { if err := mock.RegionDisks().Insert(ctx, keyGA, obj); err != nil {
t.Errorf("BetaRegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, keyBeta, obj, err) t.Errorf("RegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, keyGA, obj, err)
} }
} }
// Get across versions. // Get across versions.
if obj, err := mock.BetaRegionDisks().Get(ctx, key); err != nil { if obj, err := mock.RegionDisks().Get(ctx, key); err != nil {
t.Errorf("BetaRegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) t.Errorf("RegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
} }
// List. // List.
mock.MockBetaRegionDisks.Objects[*keyBeta] = mock.MockBetaRegionDisks.Obj(&beta.Disk{Name: keyBeta.Name}) mock.MockRegionDisks.Objects[*keyGA] = mock.MockRegionDisks.Obj(&ga.Disk{Name: keyGA.Name})
want := map[string]bool{ want := map[string]bool{
"key-beta": true, "key-ga": true,
} }
_ = want // ignore unused variables. _ = want // ignore unused variables.
{ {
objs, err := mock.BetaRegionDisks().List(ctx, location, filter.None) objs, err := mock.RegionDisks().List(ctx, location, filter.None)
if err != nil { if err != nil {
t.Errorf("BetaRegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) t.Errorf("RegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
} else { } else {
got := map[string]bool{} got := map[string]bool{}
for _, obj := range objs { for _, obj := range objs {
got[obj.Name] = true got[obj.Name] = true
} }
if !reflect.DeepEqual(got, want) { if !reflect.DeepEqual(got, want) {
t.Errorf("BetaRegionDisks().List(); got %+v, want %+v", got, want) t.Errorf("RegionDisks().List(); got %+v, want %+v", got, want)
} }
} }
} }
// Delete across versions. // Delete across versions.
if err := mock.BetaRegionDisks().Delete(ctx, keyBeta); err != nil { if err := mock.RegionDisks().Delete(ctx, keyGA); err != nil {
t.Errorf("BetaRegionDisks().Delete(%v, %v) = %v; want nil", ctx, keyBeta, err) t.Errorf("RegionDisks().Delete(%v, %v) = %v; want nil", ctx, keyGA, err)
} }
// Delete not found. // Delete not found.
if err := mock.BetaRegionDisks().Delete(ctx, keyBeta); err == nil { if err := mock.RegionDisks().Delete(ctx, keyGA); err == nil {
t.Errorf("BetaRegionDisks().Delete(%v, %v) = nil; want error", ctx, keyBeta) t.Errorf("RegionDisks().Delete(%v, %v) = nil; want error", ctx, keyGA)
} }
} }

View File

@ -169,9 +169,9 @@ var AllServices = []*ServiceInfo{
Object: "Disk", Object: "Disk",
Service: "RegionDisks", Service: "RegionDisks",
Resource: "disks", Resource: "disks",
version: VersionBeta, version: VersionGA,
keyType: Regional, keyType: Regional,
serviceType: reflect.TypeOf(&beta.RegionDisksService{}), serviceType: reflect.TypeOf(&ga.RegionDisksService{}),
additionalMethods: []string{ additionalMethods: []string{
"Resize", "Resize",
}, },

View File

@ -34,7 +34,6 @@ import (
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
"github.com/golang/glog" "github.com/golang/glog"
computebeta "google.golang.org/api/compute/v0.beta"
compute "google.golang.org/api/compute/v1" compute "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -123,7 +122,7 @@ func (manager *gceServiceManager) CreateDiskOnCloudProvider(
diskType string, diskType string,
zone string) error { zone string) error {
diskTypeURI, err := manager.getDiskTypeURI( diskTypeURI, err := manager.getDiskTypeURI(
manager.gce.region /* diskRegion */, singleZone{zone}, diskType, false /* useBetaAPI */) manager.gce.region /* diskRegion */, singleZone{zone}, diskType)
if err != nil { if err != nil {
return err return err
} }
@ -152,7 +151,7 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
} }
diskTypeURI, err := manager.getDiskTypeURI( diskTypeURI, err := manager.getDiskTypeURI(
manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType, true /* useBetaAPI */) manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType)
if err != nil { if err != nil {
return err return err
} }
@ -162,7 +161,7 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone, true)) fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone, true))
} }
diskToCreateBeta := &computebeta.Disk{ diskToCreate := &compute.Disk{
Name: name, Name: name,
SizeGb: sizeGb, SizeGb: sizeGb,
Description: tagsStr, Description: tagsStr,
@ -172,7 +171,7 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
ctx, cancel := cloud.ContextWithCallTimeout() ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel() defer cancel()
return manager.gce.c.BetaRegionDisks().Insert(ctx, meta.RegionalKey(name, manager.gce.region), diskToCreateBeta) return manager.gce.c.RegionDisks().Insert(ctx, meta.RegionalKey(name, manager.gce.region), diskToCreate)
} }
func (manager *gceServiceManager) AttachDiskOnCloudProvider( func (manager *gceServiceManager) AttachDiskOnCloudProvider(
@ -254,7 +253,7 @@ func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider(
ctx, cancel := cloud.ContextWithCallTimeout() ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel() defer cancel()
diskBeta, err := manager.gce.c.BetaRegionDisks().Get(ctx, meta.RegionalKey(diskName, manager.gce.region)) diskBeta, err := manager.gce.c.RegionDisks().Get(ctx, meta.RegionalKey(diskName, manager.gce.region))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -291,7 +290,7 @@ func (manager *gceServiceManager) DeleteRegionalDiskOnCloudProvider(
ctx, cancel := cloud.ContextWithCallTimeout() ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel() defer cancel()
return manager.gce.c.BetaRegionDisks().Delete(ctx, meta.RegionalKey(diskName, manager.gce.region)) return manager.gce.c.RegionDisks().Delete(ctx, meta.RegionalKey(diskName, manager.gce.region))
} }
func (manager *gceServiceManager) getDiskSourceURI(disk *Disk) (string, error) { func (manager *gceServiceManager) getDiskSourceURI(disk *Disk) (string, error) {
@ -329,14 +328,9 @@ func (manager *gceServiceManager) getDiskSourceURI(disk *Disk) (string, error) {
} }
func (manager *gceServiceManager) getDiskTypeURI( func (manager *gceServiceManager) getDiskTypeURI(
diskRegion string, diskZoneInfo zoneType, diskType string, useBetaAPI bool) (string, error) { diskRegion string, diskZoneInfo zoneType, diskType string) (string, error) {
var getProjectsAPIEndpoint string getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint()
if useBetaAPI {
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointBeta()
} else {
getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint()
}
switch zoneInfo := diskZoneInfo.(type) { switch zoneInfo := diskZoneInfo.(type) {
case singleZone: case singleZone:
@ -428,13 +422,13 @@ func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *Disk,
return fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk) return fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
} }
resizeServiceRequest := &computebeta.RegionDisksResizeRequest{ resizeServiceRequest := &compute.RegionDisksResizeRequest{
SizeGb: sizeGb, SizeGb: sizeGb,
} }
ctx, cancel := cloud.ContextWithCallTimeout() ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel() defer cancel()
return manager.gce.c.BetaRegionDisks().Resize(ctx, meta.RegionalKey(disk.Name, disk.Region), resizeServiceRequest) return manager.gce.c.RegionDisks().Resize(ctx, meta.RegionalKey(disk.Name, disk.Region), resizeServiceRequest)
} }
// Disks is interface for manipulation with GCE PDs. // Disks is interface for manipulation with GCE PDs.

View File

@ -110,7 +110,7 @@ func TestCreateRegionalDisk_Basic(t *testing.T) {
tags := make(map[string]string) tags := make(map[string]string)
tags["test-tag"] = "test-value" tags["test-tag"] = "test-value"
expectedDiskTypeURI := gceComputeAPIEndpointBeta + "projects/" + fmt.Sprintf( expectedDiskTypeURI := gceComputeAPIEndpoint + "projects/" + fmt.Sprintf(
diskTypeURITemplateRegional, gceProjectID, gceRegion, diskType) diskTypeURITemplateRegional, gceProjectID, gceRegion, diskType)
expectedDescription := "{\"test-tag\":\"test-value\"}" expectedDescription := "{\"test-tag\":\"test-value\"}"
@ -723,9 +723,9 @@ func (manager *FakeServiceManager) CreateRegionalDiskOnCloudProvider(
tagsStr string, tagsStr string,
diskType string, diskType string,
zones sets.String) error { zones sets.String) error {
manager.createDiskCalled = true
diskTypeURI := gceComputeAPIEndpointBeta + "projects/" + fmt.Sprintf(diskTypeURITemplateRegional, manager.gceProjectID, manager.gceRegion, diskType)
manager.createDiskCalled = true
diskTypeURI := gceComputeAPIEndpoint + "projects/" + fmt.Sprintf(diskTypeURITemplateRegional, manager.gceProjectID, manager.gceRegion, diskType)
switch t := manager.targetAPI; t { switch t := manager.targetAPI; t {
case targetStable: case targetStable:
diskToCreateV1 := &compute.Disk{ diskToCreateV1 := &compute.Disk{
@ -737,10 +737,6 @@ func (manager *FakeServiceManager) CreateRegionalDiskOnCloudProvider(
manager.diskToCreateStable = diskToCreateV1 manager.diskToCreateStable = diskToCreateV1
manager.regionalDisks[diskToCreateV1.Name] = zones manager.regionalDisks[diskToCreateV1.Name] = zones
return nil return nil
case targetBeta:
return fmt.Errorf("regionalDisk CreateDisk op not supported in beta")
case targetAlpha:
return fmt.Errorf("regionalDisk CreateDisk op not supported in alpha")
default: default:
return fmt.Errorf("unexpected type: %T", t) return fmt.Errorf("unexpected type: %T", t)
} }

View File

@ -276,7 +276,7 @@ const (
CRIContainerLogRotation utilfeature.Feature = "CRIContainerLogRotation" CRIContainerLogRotation utilfeature.Feature = "CRIContainerLogRotation"
// owner: @verult // owner: @verult
// beta: v1.10 // GA: v1.13
// //
// Enables the regional PD feature on GCE. // Enables the regional PD feature on GCE.
GCERegionalPersistentDisk utilfeature.Feature = "GCERegionalPersistentDisk" GCERegionalPersistentDisk utilfeature.Feature = "GCERegionalPersistentDisk"
@ -429,7 +429,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
TokenRequest: {Default: true, PreRelease: utilfeature.Beta}, TokenRequest: {Default: true, PreRelease: utilfeature.Beta},
TokenRequestProjection: {Default: true, PreRelease: utilfeature.Beta}, TokenRequestProjection: {Default: true, PreRelease: utilfeature.Beta},
CRIContainerLogRotation: {Default: true, PreRelease: utilfeature.Beta}, CRIContainerLogRotation: {Default: true, PreRelease: utilfeature.Beta},
GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta}, GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.GA},
RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha}, RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha},
VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, VolumeSubpath: {Default: true, PreRelease: utilfeature.GA},
BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha}, BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha},