mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Merge pull request #31678 from anguslees/instances-list-die-die-die
Automatic merge from submit-queue providers: Remove long-deprecated Instances.List()
This commit is contained in:
commit
818f6193fb
@ -119,8 +119,6 @@ type Instances interface {
|
||||
InstanceID(nodeName types.NodeName) (string, error)
|
||||
// InstanceType returns the type of the specified instance.
|
||||
InstanceType(name types.NodeName) (string, error)
|
||||
// List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn)
|
||||
List(filter string) ([]types.NodeName, error)
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
AddSSHKeyToAllInstances(user string, keyData []byte) error
|
||||
|
@ -1030,12 +1030,6 @@ func (c *Cloud) getInstancesByRegex(regex string) ([]types.NodeName, error) {
|
||||
return matchingInstances, nil
|
||||
}
|
||||
|
||||
// List is an implementation of Instances.List.
|
||||
func (c *Cloud) List(filter string) ([]types.NodeName, error) {
|
||||
// TODO: Should really use tag query. No need to go regexp.
|
||||
return c.getInstancesByRegex(filter)
|
||||
}
|
||||
|
||||
// getAllZones retrieves a list of all the zones in which nodes are running
|
||||
// It currently involves querying all instances
|
||||
func (c *Cloud) getAllZones() (sets.String, error) {
|
||||
|
@ -538,92 +538,6 @@ func mockAvailabilityZone(availabilityZone string) *Cloud {
|
||||
return awsCloud
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
// TODO this setup is not very clean and could probably be improved
|
||||
var instance0 ec2.Instance
|
||||
var instance1 ec2.Instance
|
||||
var instance2 ec2.Instance
|
||||
var instance3 ec2.Instance
|
||||
|
||||
//0
|
||||
tag0 := ec2.Tag{
|
||||
Key: aws.String("Name"),
|
||||
Value: aws.String("foo"),
|
||||
}
|
||||
instance0.Tags = []*ec2.Tag{&tag0}
|
||||
instance0.InstanceId = aws.String("instance0")
|
||||
instance0.PrivateDnsName = aws.String("instance0.ec2.internal")
|
||||
instance0.Placement = &ec2.Placement{AvailabilityZone: aws.String("us-east-1a")}
|
||||
state0 := ec2.InstanceState{
|
||||
Name: aws.String("running"),
|
||||
}
|
||||
instance0.State = &state0
|
||||
|
||||
//1
|
||||
tag1 := ec2.Tag{
|
||||
Key: aws.String("Name"),
|
||||
Value: aws.String("bar"),
|
||||
}
|
||||
instance1.Tags = []*ec2.Tag{&tag1}
|
||||
instance1.InstanceId = aws.String("instance1")
|
||||
instance1.PrivateDnsName = aws.String("instance1.ec2.internal")
|
||||
instance1.Placement = &ec2.Placement{AvailabilityZone: aws.String("us-east-1a")}
|
||||
state1 := ec2.InstanceState{
|
||||
Name: aws.String("running"),
|
||||
}
|
||||
instance1.State = &state1
|
||||
|
||||
//2
|
||||
tag2 := ec2.Tag{
|
||||
Key: aws.String("Name"),
|
||||
Value: aws.String("baz"),
|
||||
}
|
||||
instance2.Tags = []*ec2.Tag{&tag2}
|
||||
instance2.InstanceId = aws.String("instance2")
|
||||
instance2.PrivateDnsName = aws.String("instance2.ec2.internal")
|
||||
instance2.Placement = &ec2.Placement{AvailabilityZone: aws.String("us-east-1a")}
|
||||
state2 := ec2.InstanceState{
|
||||
Name: aws.String("running"),
|
||||
}
|
||||
instance2.State = &state2
|
||||
|
||||
//3
|
||||
tag3 := ec2.Tag{
|
||||
Key: aws.String("Name"),
|
||||
Value: aws.String("quux"),
|
||||
}
|
||||
instance3.Tags = []*ec2.Tag{&tag3}
|
||||
instance3.InstanceId = aws.String("instance3")
|
||||
instance3.PrivateDnsName = aws.String("instance3.ec2.internal")
|
||||
instance3.Placement = &ec2.Placement{AvailabilityZone: aws.String("us-east-1a")}
|
||||
state3 := ec2.InstanceState{
|
||||
Name: aws.String("running"),
|
||||
}
|
||||
instance3.State = &state3
|
||||
|
||||
instances := []*ec2.Instance{&instance0, &instance1, &instance2, &instance3}
|
||||
aws, _ := mockInstancesResp(&instance0, instances)
|
||||
|
||||
table := []struct {
|
||||
input string
|
||||
expect []types.NodeName
|
||||
}{
|
||||
{"blahonga", []types.NodeName{}},
|
||||
{"quux", []types.NodeName{"instance3.ec2.internal"}},
|
||||
{"a", []types.NodeName{"instance1.ec2.internal", "instance2.ec2.internal"}},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
result, err := aws.List(item.input)
|
||||
if err != nil {
|
||||
t.Errorf("Expected call with %v to succeed, failed with %v", item.input, err)
|
||||
}
|
||||
if e, a := item.expect, result; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testHasNodeAddress(t *testing.T, addrs []v1.NodeAddress, addressType v1.NodeAddressType, address string) {
|
||||
for _, addr := range addrs {
|
||||
if addr.Type == addressType && addr.Address == address {
|
||||
|
@ -71,26 +71,6 @@ func (az *Cloud) InstanceType(name types.NodeName) (string, error) {
|
||||
return string(machine.HardwareProfile.VMSize), nil
|
||||
}
|
||||
|
||||
// List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn)
|
||||
func (az *Cloud) List(filter string) ([]types.NodeName, error) {
|
||||
allNodes, err := az.listAllNodesInResourceGroup()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filteredNodes, err := filterNodes(allNodes, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodeNames := make([]types.NodeName, len(filteredNodes))
|
||||
for i, v := range filteredNodes {
|
||||
nodeNames[i] = types.NodeName(*v.Name)
|
||||
}
|
||||
|
||||
return nodeNames, nil
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
func (az *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
|
@ -2277,37 +2277,6 @@ func (gce *GCECloud) InstanceType(nodeName types.NodeName) (string, error) {
|
||||
return instance.Type, nil
|
||||
}
|
||||
|
||||
// List is an implementation of Instances.List.
|
||||
func (gce *GCECloud) List(filter string) ([]types.NodeName, error) {
|
||||
var instances []types.NodeName
|
||||
// TODO: Parallelize, although O(zones) so not too bad (N <= 3 typically)
|
||||
for _, zone := range gce.managedZones {
|
||||
pageToken := ""
|
||||
page := 0
|
||||
for ; page == 0 || (pageToken != "" && page < maxPages); page++ {
|
||||
listCall := gce.service.Instances.List(gce.projectID, zone)
|
||||
if len(filter) > 0 {
|
||||
listCall = listCall.Filter("name eq " + filter)
|
||||
}
|
||||
if pageToken != "" {
|
||||
listCall = listCall.PageToken(pageToken)
|
||||
}
|
||||
res, err := listCall.Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pageToken = res.NextPageToken
|
||||
for _, instance := range res.Items {
|
||||
instances = append(instances, mapInstanceToNodeName(instance))
|
||||
}
|
||||
}
|
||||
if page >= maxPages {
|
||||
glog.Errorf("List exceeded maxPages=%d for Instances.List: truncating.", maxPages)
|
||||
}
|
||||
}
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
// GetAllZones returns all the zones in which nodes are running
|
||||
func (gce *GCECloud) GetAllZones() (sets.String, error) {
|
||||
// Fast-path for non-multizone
|
||||
|
@ -272,50 +272,6 @@ func TestNewOpenStack(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstances(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
os, err := newOpenStack(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate OpenStack: %s", err)
|
||||
}
|
||||
|
||||
i, ok := os.Instances()
|
||||
if !ok {
|
||||
t.Fatalf("Instances() returned false")
|
||||
}
|
||||
|
||||
srvs, err := i.List(".")
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.List() failed: %s", err)
|
||||
}
|
||||
if len(srvs) == 0 {
|
||||
t.Fatalf("Instances.List() returned zero servers")
|
||||
}
|
||||
t.Logf("Found servers (%d): %s\n", len(srvs), srvs)
|
||||
|
||||
srvExternalId, err := i.ExternalID(srvs[0])
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.ExternalId(%s) failed: %s", srvs[0], err)
|
||||
}
|
||||
t.Logf("Found server (%s), with external id: %s\n", srvs[0], srvExternalId)
|
||||
|
||||
srvInstanceId, err := i.InstanceID(srvs[0])
|
||||
if err != nil {
|
||||
t.Fatalf("Instance.InstanceId(%s) failed: %s", srvs[0], err)
|
||||
}
|
||||
t.Logf("Found server (%s), with instance id: %s\n", srvs[0], srvInstanceId)
|
||||
|
||||
addrs, err := i.NodeAddresses(srvs[0])
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.NodeAddresses(%s) failed: %s", srvs[0], err)
|
||||
}
|
||||
t.Logf("Found NodeAddresses(%s) = %s\n", srvs[0], addrs)
|
||||
}
|
||||
|
||||
func TestLoadBalancer(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
|
@ -287,19 +287,6 @@ func (m *OVirtInstanceMap) ListSortedNames() []string {
|
||||
return names
|
||||
}
|
||||
|
||||
// List enumerates the set of nodes instances known by the cloud provider
|
||||
func (v *OVirtCloud) List(filter string) ([]types.NodeName, error) {
|
||||
instances, err := v.fetchAllInstances()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var nodeNames []types.NodeName
|
||||
for _, s := range instances.ListSortedNames() {
|
||||
nodeNames = append(nodeNames, types.NodeName(s))
|
||||
}
|
||||
return nodeNames, nil
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (v *OVirtCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
|
@ -231,35 +231,6 @@ func (os *Rackspace) Instances() (cloudprovider.Instances, bool) {
|
||||
return &Instances{compute}, true
|
||||
}
|
||||
|
||||
func (i *Instances) List(name_filter string) ([]types.NodeName, error) {
|
||||
glog.V(2).Infof("rackspace List(%v) called", name_filter)
|
||||
|
||||
opts := osservers.ListOpts{
|
||||
Name: name_filter,
|
||||
Status: "ACTIVE",
|
||||
}
|
||||
pager := servers.List(i.compute, opts)
|
||||
|
||||
ret := make([]types.NodeName, 0)
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
sList, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range sList {
|
||||
ret = append(ret, mapServerToNodeName(&sList[i]))
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Found %v entries: %v", len(ret), ret)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func serverHasAddress(srv osservers.Server, ip string) bool {
|
||||
if ip == firstAddr(srv.Addresses["private"]) {
|
||||
return true
|
||||
|
@ -140,38 +140,6 @@ func TestNewRackspace(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstances(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
os, err := newRackspace(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate Rackspace: %s", err)
|
||||
}
|
||||
|
||||
i, ok := os.Instances()
|
||||
if !ok {
|
||||
t.Fatalf("Instances() returned false")
|
||||
}
|
||||
|
||||
srvs, err := i.List(".")
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.List() failed: %s", err)
|
||||
}
|
||||
if len(srvs) == 0 {
|
||||
t.Fatalf("Instances.List() returned zero servers")
|
||||
}
|
||||
t.Logf("Found servers (%d): %s\n", len(srvs), srvs)
|
||||
|
||||
addrs, err := i.NodeAddresses(srvs[0])
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.NodeAddresses(%s) failed: %s", srvs[0], err)
|
||||
}
|
||||
t.Logf("Found NodeAddresses(%s) = %s\n", srvs[0], addrs)
|
||||
}
|
||||
|
||||
func TestZones(t *testing.T) {
|
||||
os := Rackspace{
|
||||
provider: &gophercloud.ProviderClient{
|
||||
|
@ -450,11 +450,11 @@ func (vs *VSphere) Instances() (cloudprovider.Instances, bool) {
|
||||
}
|
||||
|
||||
// List returns names of VMs (inside vm folder) by applying filter and which are currently running.
|
||||
func (i *Instances) List(filter string) ([]k8stypes.NodeName, error) {
|
||||
func (vs *VSphere) list(filter string) ([]k8stypes.NodeName, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
vmList, err := getInstances(ctx, i.cfg, i.client, filter)
|
||||
vmList, err := getInstances(ctx, vs.cfg, vs.client, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -156,13 +156,13 @@ func TestInstances(t *testing.T) {
|
||||
t.Fatalf("Instances() returned false")
|
||||
}
|
||||
|
||||
srvs, err := i.List("*")
|
||||
srvs, err := vs.list("*")
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.List() failed: %s", err)
|
||||
t.Fatalf("list() failed: %s", err)
|
||||
}
|
||||
|
||||
if len(srvs) == 0 {
|
||||
t.Fatalf("Instances.List() returned zero servers")
|
||||
t.Fatalf("list() returned zero servers")
|
||||
}
|
||||
t.Logf("Found servers (%d): %s\n", len(srvs), srvs)
|
||||
|
||||
@ -215,17 +215,12 @@ func TestVolumes(t *testing.T) {
|
||||
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
|
||||
}
|
||||
|
||||
i, ok := vs.Instances()
|
||||
if !ok {
|
||||
t.Fatalf("Instances() returned false")
|
||||
}
|
||||
|
||||
srvs, err := i.List("*")
|
||||
srvs, err := vs.list("*")
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.List() failed: %s", err)
|
||||
t.Fatalf("list() failed: %s", err)
|
||||
}
|
||||
if len(srvs) == 0 {
|
||||
t.Fatalf("Instances.List() returned zero servers")
|
||||
t.Fatalf("list() returned zero servers")
|
||||
}
|
||||
|
||||
volumeOptions := &VolumeOptions{
|
||||
|
Loading…
Reference in New Issue
Block a user