mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Remove the exponential backoff in NodeGetInfo
This commit is contained in:
parent
9a1ea1844e
commit
f48e5de963
@ -31,7 +31,6 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
@ -152,22 +151,12 @@ func (c *csiDriverClient) NodeGetInfo(ctx context.Context) (
|
||||
err error) {
|
||||
klog.V(4).Info(log("calling NodeGetInfo rpc"))
|
||||
|
||||
// TODO retries should happen at a lower layer (issue #73371)
|
||||
backoff := wait.Backoff{Duration: initialDuration, Factor: factor, Steps: steps}
|
||||
err = wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
var getNodeInfoError error
|
||||
nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError = c.nodeGetInfoV1(ctx)
|
||||
if nodeID != "" {
|
||||
return true, nil
|
||||
}
|
||||
if getNodeInfoError != nil {
|
||||
klog.Warningf("Error calling CSI NodeGetInfo(): %v", getNodeInfoError.Error())
|
||||
}
|
||||
// Continue with exponential backoff
|
||||
return false, nil
|
||||
})
|
||||
|
||||
return nodeID, maxVolumePerNode, accessibleTopology, err
|
||||
var getNodeInfoError error
|
||||
nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError = c.nodeGetInfoV1(ctx)
|
||||
if getNodeInfoError != nil {
|
||||
klog.Warningf("Error calling CSI NodeGetInfo(): %v", getNodeInfoError.Error())
|
||||
}
|
||||
return nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) nodeGetInfoV1(ctx context.Context) (
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/csi/fake"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
@ -314,7 +313,6 @@ func TestClientNodeGetInfo(t *testing.T) {
|
||||
expectedMaxVolumePerNode int64
|
||||
expectedAccessibleTopology map[string]string
|
||||
mustFail bool
|
||||
mustTimeout bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
@ -328,13 +326,6 @@ func TestClientNodeGetInfo(t *testing.T) {
|
||||
mustFail: true,
|
||||
err: errors.New("grpc error"),
|
||||
},
|
||||
{
|
||||
name: "test empty nodeId",
|
||||
mustTimeout: true,
|
||||
expectedNodeID: "",
|
||||
expectedMaxVolumePerNode: 16,
|
||||
expectedAccessibleTopology: map[string]string{"com.example.csi-topology/zone": "zone1"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@ -358,13 +349,7 @@ func TestClientNodeGetInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
nodeID, maxVolumePerNode, accessibleTopology, err := client.NodeGetInfo(context.Background())
|
||||
if tc.mustTimeout {
|
||||
if wait.ErrWaitTimeout.Error() != err.Error() {
|
||||
t.Errorf("should have timed out : %s", tc.name)
|
||||
}
|
||||
} else {
|
||||
checkErr(t, tc.mustFail, err)
|
||||
}
|
||||
checkErr(t, tc.mustFail, err)
|
||||
|
||||
if nodeID != tc.expectedNodeID {
|
||||
t.Errorf("expected nodeID: %v; got: %v", tc.expectedNodeID, nodeID)
|
||||
|
Loading…
Reference in New Issue
Block a user