mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #87911 from tedyu/rm-exp-backoff
Remove the exponential backoff in NodeGetInfo
This commit is contained in:
commit
fd0b34d69d
@ -31,7 +31,6 @@ import (
|
|||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
@ -152,22 +151,12 @@ func (c *csiDriverClient) NodeGetInfo(ctx context.Context) (
|
|||||||
err error) {
|
err error) {
|
||||||
klog.V(4).Info(log("calling NodeGetInfo rpc"))
|
klog.V(4).Info(log("calling NodeGetInfo rpc"))
|
||||||
|
|
||||||
// TODO retries should happen at a lower layer (issue #73371)
|
var getNodeInfoError error
|
||||||
backoff := wait.Backoff{Duration: initialDuration, Factor: factor, Steps: steps}
|
nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError = c.nodeGetInfoV1(ctx)
|
||||||
err = wait.ExponentialBackoff(backoff, func() (bool, error) {
|
if getNodeInfoError != nil {
|
||||||
var getNodeInfoError error
|
klog.Warningf("Error calling CSI NodeGetInfo(): %v", getNodeInfoError.Error())
|
||||||
nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError = c.nodeGetInfoV1(ctx)
|
}
|
||||||
if nodeID != "" {
|
return nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
if getNodeInfoError != nil {
|
|
||||||
klog.Warningf("Error calling CSI NodeGetInfo(): %v", getNodeInfoError.Error())
|
|
||||||
}
|
|
||||||
// Continue with exponential backoff
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
|
|
||||||
return nodeID, maxVolumePerNode, accessibleTopology, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *csiDriverClient) nodeGetInfoV1(ctx context.Context) (
|
func (c *csiDriverClient) nodeGetInfoV1(ctx context.Context) (
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
|
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/csi/fake"
|
"k8s.io/kubernetes/pkg/volume/csi/fake"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
@ -314,7 +313,6 @@ func TestClientNodeGetInfo(t *testing.T) {
|
|||||||
expectedMaxVolumePerNode int64
|
expectedMaxVolumePerNode int64
|
||||||
expectedAccessibleTopology map[string]string
|
expectedAccessibleTopology map[string]string
|
||||||
mustFail bool
|
mustFail bool
|
||||||
mustTimeout bool
|
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -328,13 +326,6 @@ func TestClientNodeGetInfo(t *testing.T) {
|
|||||||
mustFail: true,
|
mustFail: true,
|
||||||
err: errors.New("grpc error"),
|
err: errors.New("grpc error"),
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "test empty nodeId",
|
|
||||||
mustTimeout: true,
|
|
||||||
expectedNodeID: "",
|
|
||||||
expectedMaxVolumePerNode: 16,
|
|
||||||
expectedAccessibleTopology: map[string]string{"com.example.csi-topology/zone": "zone1"},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
@ -358,13 +349,7 @@ func TestClientNodeGetInfo(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nodeID, maxVolumePerNode, accessibleTopology, err := client.NodeGetInfo(context.Background())
|
nodeID, maxVolumePerNode, accessibleTopology, err := client.NodeGetInfo(context.Background())
|
||||||
if tc.mustTimeout {
|
checkErr(t, tc.mustFail, err)
|
||||||
if wait.ErrWaitTimeout.Error() != err.Error() {
|
|
||||||
t.Errorf("should have timed out : %s", tc.name)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
checkErr(t, tc.mustFail, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if nodeID != tc.expectedNodeID {
|
if nodeID != tc.expectedNodeID {
|
||||||
t.Errorf("expected nodeID: %v; got: %v", tc.expectedNodeID, nodeID)
|
t.Errorf("expected nodeID: %v; got: %v", tc.expectedNodeID, nodeID)
|
||||||
|
Loading…
Reference in New Issue
Block a user