mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 21:17:23 +00:00
remove custom timeout in test that is never exercised
and misc cleanup
This commit is contained in:
parent
d2570d3971
commit
9ece24c33f
@ -21,6 +21,7 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -63,27 +63,27 @@ func NewSyncManager(cloud cloudprovider.Interface, nodeName types.NodeName, sync
|
||||
}
|
||||
}
|
||||
|
||||
func (manager *cloudResourceSyncManager) getNodeAddressSafe() ([]v1.NodeAddress, error) {
|
||||
manager.nodeAddressesMux.Lock()
|
||||
defer manager.nodeAddressesMux.Unlock()
|
||||
func (m *cloudResourceSyncManager) getNodeAddressSafe() ([]v1.NodeAddress, error) {
|
||||
m.nodeAddressesMux.Lock()
|
||||
defer m.nodeAddressesMux.Unlock()
|
||||
|
||||
return manager.nodeAddresses, manager.nodeAddressesErr
|
||||
return m.nodeAddresses, m.nodeAddressesErr
|
||||
}
|
||||
|
||||
func (manager *cloudResourceSyncManager) setNodeAddressSafe(nodeAddresses []v1.NodeAddress, err error) {
|
||||
manager.nodeAddressesMux.Lock()
|
||||
defer manager.nodeAddressesMux.Unlock()
|
||||
func (m *cloudResourceSyncManager) setNodeAddressSafe(nodeAddresses []v1.NodeAddress, err error) {
|
||||
m.nodeAddressesMux.Lock()
|
||||
defer m.nodeAddressesMux.Unlock()
|
||||
|
||||
manager.nodeAddresses = nodeAddresses
|
||||
manager.nodeAddressesErr = err
|
||||
m.nodeAddresses = nodeAddresses
|
||||
m.nodeAddressesErr = err
|
||||
}
|
||||
|
||||
// NodeAddresses does not wait for cloud provider to return a node addresses.
|
||||
// It always returns node addresses or an error.
|
||||
func (manager *cloudResourceSyncManager) NodeAddresses() ([]v1.NodeAddress, error) {
|
||||
func (m *cloudResourceSyncManager) NodeAddresses() ([]v1.NodeAddress, error) {
|
||||
// wait until there is something
|
||||
for {
|
||||
nodeAddresses, err := manager.getNodeAddressSafe()
|
||||
nodeAddresses, err := m.getNodeAddressSafe()
|
||||
if len(nodeAddresses) == 0 && err == nil {
|
||||
klog.V(5).Infof("Waiting for %v for cloud provider to provide node addresses", nodeAddressesRetryPeriod)
|
||||
time.Sleep(nodeAddressesRetryPeriod)
|
||||
@ -93,12 +93,12 @@ func (manager *cloudResourceSyncManager) NodeAddresses() ([]v1.NodeAddress, erro
|
||||
}
|
||||
}
|
||||
|
||||
func (manager *cloudResourceSyncManager) collectNodeAddresses(ctx context.Context, nodeName types.NodeName) {
|
||||
func (m *cloudResourceSyncManager) collectNodeAddresses(ctx context.Context, nodeName types.NodeName) {
|
||||
klog.V(5).Infof("Requesting node addresses from cloud provider for node %q", nodeName)
|
||||
|
||||
instances, ok := manager.cloud.Instances()
|
||||
instances, ok := m.cloud.Instances()
|
||||
if !ok {
|
||||
manager.setNodeAddressSafe(nil, fmt.Errorf("failed to get instances from cloud provider"))
|
||||
m.setNodeAddressSafe(nil, fmt.Errorf("failed to get instances from cloud provider"))
|
||||
return
|
||||
}
|
||||
|
||||
@ -109,16 +109,17 @@ func (manager *cloudResourceSyncManager) collectNodeAddresses(ctx context.Contex
|
||||
|
||||
nodeAddresses, err := instances.NodeAddresses(ctx, nodeName)
|
||||
if err != nil {
|
||||
manager.setNodeAddressSafe(nil, fmt.Errorf("failed to get node address from cloud provider: %v", err))
|
||||
m.setNodeAddressSafe(nil, fmt.Errorf("failed to get node address from cloud provider: %v", err))
|
||||
klog.V(2).Infof("Node addresses from cloud provider for node %q not collected", nodeName)
|
||||
} else {
|
||||
manager.setNodeAddressSafe(nodeAddresses, nil)
|
||||
m.setNodeAddressSafe(nodeAddresses, nil)
|
||||
klog.V(5).Infof("Node addresses from cloud provider for node %q collected", nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
func (manager *cloudResourceSyncManager) Run(stopCh <-chan struct{}) {
|
||||
// Run starts the cloud resource sync manager's sync loop.
|
||||
func (m *cloudResourceSyncManager) Run(stopCh <-chan struct{}) {
|
||||
wait.Until(func() {
|
||||
manager.collectNodeAddresses(context.TODO(), manager.nodeName)
|
||||
}, manager.syncPeriod, stopCh)
|
||||
m.collectNodeAddresses(context.TODO(), m.nodeName)
|
||||
}, m.syncPeriod, stopCh)
|
||||
}
|
||||
|
@ -17,33 +17,15 @@ limitations under the License.
|
||||
package cloudresource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
)
|
||||
|
||||
func collectNodeAddresses(manager *cloudResourceSyncManager) ([]v1.NodeAddress, error) {
|
||||
var nodeAddresses []v1.NodeAddress
|
||||
var err error
|
||||
|
||||
collected := make(chan struct{}, 1)
|
||||
go func() {
|
||||
nodeAddresses, err = manager.NodeAddresses()
|
||||
close(collected)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-collected:
|
||||
return nodeAddresses, err
|
||||
case <-time.Tick(2 * nodeAddressesRetryPeriod):
|
||||
return nil, fmt.Errorf("Timeout after %v waiting for address to appear", 2*nodeAddressesRetryPeriod)
|
||||
}
|
||||
}
|
||||
|
||||
func createNodeInternalIPAddress(address string) []v1.NodeAddress {
|
||||
return []v1.NodeAddress{
|
||||
{
|
||||
@ -67,12 +49,12 @@ func TestNodeAddressesRequest(t *testing.T) {
|
||||
manager := NewSyncManager(cloud, "defaultNode", syncPeriod).(*cloudResourceSyncManager)
|
||||
go manager.Run(stopCh)
|
||||
|
||||
nodeAddresses, err := collectNodeAddresses(manager)
|
||||
nodeAddresses, err := manager.NodeAddresses()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected err: %q\n", err)
|
||||
}
|
||||
if !reflect.DeepEqual(nodeAddresses, cloud.Addresses) {
|
||||
t.Errorf("Unexpected list of node addresses %#v, expected %#v: %v", nodeAddresses, cloud.Addresses, err)
|
||||
t.Errorf("Unexpected diff of node addresses: %v", diff.ObjectReflectDiff(nodeAddresses, cloud.Addresses))
|
||||
}
|
||||
|
||||
// Change the IP address
|
||||
@ -80,7 +62,7 @@ func TestNodeAddressesRequest(t *testing.T) {
|
||||
|
||||
// Wait until the IP address changes
|
||||
for i := 0; i < maxRetry; i++ {
|
||||
nodeAddresses, err := collectNodeAddresses(manager)
|
||||
nodeAddresses, err := manager.NodeAddresses()
|
||||
t.Logf("nodeAddresses: %#v, err: %v", nodeAddresses, err)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected err: %q\n", err)
|
||||
|
Loading…
Reference in New Issue
Block a user