Merge pull request #12729 from cjcullen/gce

Clean up GCE metadata calls. Remove GetNodeResources from all providers.
This commit is contained in:
Marek Grabowski 2015-08-17 10:48:48 +02:00
commit 48184026f1
13 changed files with 5 additions and 495 deletions

View File

@ -107,8 +107,6 @@ type Instances interface {
InstanceID(name string) (string, error)
// List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn)
List(filter string) ([]string, error)
// GetNodeResources gets the resources for a particular node
GetNodeResources(name string) (*api.NodeResources, error)
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
AddSSHKeyToAllInstances(user string, keyData []byte) error

View File

@ -39,7 +39,6 @@ import (
"github.com/aws/aws-sdk-go/service/elb"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/cloudprovider"
"github.com/golang/glog"
@ -772,183 +771,6 @@ func (aws *AWSCloud) List(filter string) ([]string, error) {
return aws.getInstancesByRegex(filter)
}
// GetNodeResources implements Instances.GetNodeResources
func (aws *AWSCloud) GetNodeResources(name string) (*api.NodeResources, error) {
instance, err := aws.getInstanceByNodeName(name)
if err != nil {
return nil, err
}
resources, err := getResourcesByInstanceType(orEmpty(instance.InstanceType))
if err != nil {
return nil, err
}
return resources, nil
}
// Builds an api.NodeResources
// cpu is in ecus, memory is in GiB
// We pass the family in so that we could provide more info (e.g. GPU or not)
func makeNodeResources(family string, cpu float64, memory float64) (*api.NodeResources, error) {
return &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(int64(cpu*1000), resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(int64(memory*1024*1024*1024), resource.BinarySI),
},
}, nil
}
// Maps an EC2 instance type to k8s resource information
func getResourcesByInstanceType(instanceType string) (*api.NodeResources, error) {
// There is no API for this (that I know of)
switch instanceType {
// t2: Burstable
// TODO: The ECUs are fake values (because they are burstable), so this is just a guess...
case "t1.micro":
return makeNodeResources("t1", 0.125, 0.615)
// t2: Burstable
// TODO: The ECUs are fake values (because they are burstable), so this is just a guess...
case "t2.micro":
return makeNodeResources("t2", 0.25, 1)
case "t2.small":
return makeNodeResources("t2", 0.5, 2)
case "t2.medium":
return makeNodeResources("t2", 1, 4)
// c1: Compute optimized
case "c1.medium":
return makeNodeResources("c1", 5, 1.7)
case "c1.xlarge":
return makeNodeResources("c1", 20, 7)
// cc2: Compute optimized
case "cc2.8xlarge":
return makeNodeResources("cc2", 88, 60.5)
// cg1: GPU instances
case "cg1.4xlarge":
return makeNodeResources("cg1", 33.5, 22.5)
// cr1: Memory optimized
case "cr1.8xlarge":
return makeNodeResources("cr1", 88, 244)
// c3: Compute optimized
case "c3.large":
return makeNodeResources("c3", 7, 3.75)
case "c3.xlarge":
return makeNodeResources("c3", 14, 7.5)
case "c3.2xlarge":
return makeNodeResources("c3", 28, 15)
case "c3.4xlarge":
return makeNodeResources("c3", 55, 30)
case "c3.8xlarge":
return makeNodeResources("c3", 108, 60)
// c4: Compute optimized
case "c4.large":
return makeNodeResources("c4", 8, 3.75)
case "c4.xlarge":
return makeNodeResources("c4", 16, 7.5)
case "c4.2xlarge":
return makeNodeResources("c4", 31, 15)
case "c4.4xlarge":
return makeNodeResources("c4", 62, 30)
case "c4.8xlarge":
return makeNodeResources("c4", 132, 60)
// g2: GPU instances
case "g2.2xlarge":
return makeNodeResources("g2", 26, 15)
// hi1: Storage optimized (SSD)
case "hi1.4xlarge":
return makeNodeResources("hs1", 35, 60.5)
// hs1: Storage optimized (HDD)
case "hs1.8xlarge":
return makeNodeResources("hs1", 35, 117)
// d2: Dense instances (next-gen of hs1)
case "d2.xlarge":
return makeNodeResources("d2", 14, 30.5)
case "d2.2xlarge":
return makeNodeResources("d2", 28, 61)
case "d2.4xlarge":
return makeNodeResources("d2", 56, 122)
case "d2.8xlarge":
return makeNodeResources("d2", 116, 244)
// m1: General purpose
case "m1.small":
return makeNodeResources("m1", 1, 1.7)
case "m1.medium":
return makeNodeResources("m1", 2, 3.75)
case "m1.large":
return makeNodeResources("m1", 4, 7.5)
case "m1.xlarge":
return makeNodeResources("m1", 8, 15)
// m2: Memory optimized
case "m2.xlarge":
return makeNodeResources("m2", 6.5, 17.1)
case "m2.2xlarge":
return makeNodeResources("m2", 13, 34.2)
case "m2.4xlarge":
return makeNodeResources("m2", 26, 68.4)
// m3: General purpose
case "m3.medium":
return makeNodeResources("m3", 3, 3.75)
case "m3.large":
return makeNodeResources("m3", 6.5, 7.5)
case "m3.xlarge":
return makeNodeResources("m3", 13, 15)
case "m3.2xlarge":
return makeNodeResources("m3", 26, 30)
// m4: General purpose
case "m4.large":
return makeNodeResources("m4", 6.5, 8)
case "m4.xlarge":
return makeNodeResources("m4", 13, 16)
case "m4.2xlarge":
return makeNodeResources("m4", 26, 32)
case "m4.4xlarge":
return makeNodeResources("m4", 53.5, 64)
case "m4.10xlarge":
return makeNodeResources("m4", 124.5, 160)
// i2: Storage optimized (SSD)
case "i2.xlarge":
return makeNodeResources("i2", 14, 30.5)
case "i2.2xlarge":
return makeNodeResources("i2", 27, 61)
case "i2.4xlarge":
return makeNodeResources("i2", 53, 122)
case "i2.8xlarge":
return makeNodeResources("i2", 104, 244)
// r3: Memory optimized
case "r3.large":
return makeNodeResources("r3", 6.5, 15)
case "r3.xlarge":
return makeNodeResources("r3", 13, 30.5)
case "r3.2xlarge":
return makeNodeResources("r3", 26, 61)
case "r3.4xlarge":
return makeNodeResources("r3", 52, 122)
case "r3.8xlarge":
return makeNodeResources("r3", 104, 244)
default:
glog.Errorf("unknown instanceType: %s", instanceType)
return nil, nil
}
}
// GetZone implements Zones.GetZone
func (self *AWSCloud) GetZone() (cloudprovider.Zone, error) {
if self.availabilityZone == "" {

View File

@ -29,7 +29,6 @@ import (
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
)
const TestClusterId = "clusterid.test"
@ -601,76 +600,3 @@ func TestGetRegion(t *testing.T) {
t.Errorf("Unexpected FailureDomain: %s", zone.FailureDomain)
}
}
func TestGetResources(t *testing.T) {
var instance0 ec2.Instance
var instance1 ec2.Instance
var instance2 ec2.Instance
//0
instance0.InstanceID = aws.String("m3.medium")
instance0.PrivateDNSName = aws.String("m3-medium.ec2.internal")
instance0.InstanceType = aws.String("m3.medium")
state0 := ec2.InstanceState{
Name: aws.String("running"),
}
instance0.State = &state0
//1
instance1.InstanceID = aws.String("r3.8xlarge")
instance1.PrivateDNSName = aws.String("r3-8xlarge.ec2.internal")
instance1.InstanceType = aws.String("r3.8xlarge")
state1 := ec2.InstanceState{
Name: aws.String("running"),
}
instance1.State = &state1
//2
instance2.InstanceID = aws.String("unknown.type")
instance2.PrivateDNSName = aws.String("unknown-type.ec2.internal")
instance2.InstanceType = aws.String("unknown.type")
state2 := ec2.InstanceState{
Name: aws.String("running"),
}
instance2.State = &state2
instances := []*ec2.Instance{&instance0, &instance1, &instance2}
aws1 := mockInstancesResp(instances)
res1, err1 := aws1.GetNodeResources("m3-medium.ec2.internal")
if err1 != nil {
t.Errorf("Should not error when instance type found: %v", err1)
}
e1 := &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(int64(3.0*1000), resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(int64(3.75*1024*1024*1024), resource.BinarySI),
},
}
if !reflect.DeepEqual(e1, res1) {
t.Errorf("Expected %v, got %v", e1, res1)
}
res2, err2 := aws1.GetNodeResources("r3-8xlarge.ec2.internal")
if err2 != nil {
t.Errorf("Should not error when instance type found: %v", err2)
}
e2 := &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(int64(104.0*1000), resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(int64(244.0*1024*1024*1024), resource.BinarySI),
},
}
if !reflect.DeepEqual(e2, res2) {
t.Errorf("Expected %v, got %v", e2, res2)
}
res3, err3 := aws1.GetNodeResources("unknown-type.ec2.internal")
if err3 != nil {
t.Errorf("Should not error when unknown instance type")
}
if res3 != nil {
t.Errorf("Should return nil resources when unknown instance type")
}
}

View File

@ -198,11 +198,6 @@ func (f *FakeCloud) GetZone() (cloudprovider.Zone, error) {
return f.Zone, f.Err
}
func (f *FakeCloud) GetNodeResources(name string) (*api.NodeResources, error) {
f.addCall("get-node-resources")
return f.NodeResources, f.Err
}
func (f *FakeCloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
f.Lock.Lock()
defer f.Lock.Unlock()

View File

@ -19,7 +19,6 @@ package gce_cloud
import (
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"path"
@ -28,7 +27,6 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/wait"
@ -44,9 +42,7 @@ import (
)
const (
ProviderName = "gce"
EXTERNAL_IP_METADATA_URL = "http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip"
INTERNAL_IP_METADATA_URL = "http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/ip"
ProviderName = "gce"
)
const k8sNodeRouteTag = "k8s-node-route"
@ -60,9 +56,6 @@ type GCECloud struct {
instanceID string
externalID string
networkURL string
// Used for accessing the metadata server
metadataAccess func(string) (string, error)
}
type Config struct {
@ -77,25 +70,6 @@ func init() {
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { return newGCECloud(config) })
}
func getMetadata(url string) (string, error) {
client := http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", err
}
req.Header.Add("X-Google-Metadata-Request", "True")
res, err := client.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
return string(data), nil
}
func getProjectAndZone() (string, string, error) {
result, err := metadata.Get("instance/zone")
if err != nil {
@ -205,7 +179,6 @@ func newGCECloud(config io.Reader) (*GCECloud, error) {
instanceID: instanceID,
externalID: externalID,
networkURL: networkURL,
metadataAccess: getMetadata,
}, nil
}
@ -640,11 +613,11 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error
// NodeAddresses is an implementation of Instances.NodeAddresses.
func (gce *GCECloud) NodeAddresses(_ string) ([]api.NodeAddress, error) {
internalIP, err := gce.metadataAccess(INTERNAL_IP_METADATA_URL)
internalIP, err := metadata.Get("instance/network-interfaces/0/ip")
if err != nil {
return nil, fmt.Errorf("couldn't get internal IP: %v", err)
}
externalIP, err := gce.metadataAccess(EXTERNAL_IP_METADATA_URL)
externalIP, err := metadata.Get("instance/network-interfaces/0/access-configs/0/external-ip")
if err != nil {
return nil, fmt.Errorf("couldn't get external IP: %v", err)
}
@ -693,50 +666,6 @@ func (gce *GCECloud) List(filter string) ([]string, error) {
return instances, nil
}
// cpu is in cores, memory is in GiB
func makeResources(cpu float64, memory float64) *api.NodeResources {
return &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(int64(cpu*1000), resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(int64(memory*1024*1024*1024), resource.BinarySI),
},
}
}
func canonicalizeMachineType(machineType string) string {
ix := strings.LastIndex(machineType, "/")
return machineType[ix+1:]
}
func (gce *GCECloud) GetNodeResources(name string) (*api.NodeResources, error) {
instance := canonicalizeInstanceName(name)
instanceCall := gce.service.Instances.Get(gce.projectID, gce.zone, instance)
res, err := instanceCall.Do()
if err != nil {
return nil, err
}
// TODO: actually read machine size instead of this awful hack.
switch canonicalizeMachineType(res.MachineType) {
case "f1-micro":
return makeResources(1, 0.6), nil
case "g1-small":
return makeResources(1, 1.70), nil
case "n1-standard-1":
return makeResources(1, 3.75), nil
case "n1-standard-2":
return makeResources(2, 7.5), nil
case "n1-standard-4":
return makeResources(4, 15), nil
case "n1-standard-8":
return makeResources(8, 30), nil
case "n1-standard-16":
return makeResources(16, 30), nil
default:
glog.Errorf("unknown machine: %s", res.MachineType)
return nil, nil
}
}
func getMetadataValue(metadata *compute.Metadata, key string) (string, bool) {
for _, item := range metadata.Items {
if item.Key == key {

View File

@ -217,29 +217,6 @@ func (c *MesosCloud) List(filter string) ([]string, error) {
return addr, err
}
// GetNodeResources gets the resources for a particular node
func (c *MesosCloud) GetNodeResources(name string) (*api.NodeResources, error) {
//TODO(jdef) use a timeout here? 15s?
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
nodes, err := c.client.listSlaves(ctx)
if err != nil {
return nil, err
}
if len(nodes) == 0 {
log.V(2).Info("no slaves found, are any running?")
} else {
for _, node := range nodes {
if name == node.hostname {
return node.resources, nil
}
}
}
log.Warningf("failed to locate node spec for %q", name)
return nil, nil
}
// NodeAddresses returns the addresses of the specified instance.
func (c *MesosCloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
ip, err := ipAddress(name)

View File

@ -24,7 +24,6 @@ import (
"time"
log "github.com/golang/glog"
"speter.net/go/exp/math/dec/inf"
)
func TestIPAddress(t *testing.T) {
@ -253,35 +252,3 @@ func Test_List(t *testing.T) {
t.Fatalf("List with a reject-all filter should return a list of size 0: (actual: %#v)", clusters)
}
}
// test mesos.GetNodeResources
func Test_GetNodeResources(t *testing.T) {
defer log.Flush()
md := FakeMasterDetector{}
httpServer, httpClient, httpTransport := makeHttpMocks()
defer httpServer.Close()
cacheTTL := 500 * time.Millisecond
mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL)
mesosCloud := &MesosCloud{client: mesosClient, config: createDefaultConfig()}
resources, err := mesosCloud.GetNodeResources("mesos1.internal.company.com")
if err != nil {
t.Fatalf("GetNodeResources does not yield an error: %#v", err)
}
expectedCpu := inf.NewDec(8, 0)
expectedMem := inf.NewDec(15360, 0)
actualCpu := resources.Capacity["cpu"].Amount
actualMem := resources.Capacity["memory"].Amount
if actualCpu.Cmp(expectedCpu) != 0 {
t.Fatalf("GetNodeResources should return the expected amount of cpu: (expected: %#v, vactual: %#v)", expectedCpu, actualCpu)
}
if actualMem.Cmp(expectedMem) != 0 {
t.Fatalf("GetNodeResources should return the expected amount of memory: (expected: %#v, vactual: %#v)", expectedMem, actualMem)
}
}

View File

@ -386,32 +386,6 @@ func (i *Instances) InstanceID(name string) (string, error) {
return "/" + srv.ID, nil
}
func (i *Instances) GetNodeResources(name string) (*api.NodeResources, error) {
glog.V(4).Infof("GetNodeResources(%v) called", name)
srv, err := getServerByName(i.compute, name)
if err != nil {
return nil, err
}
s, ok := srv.Flavor["id"]
if !ok {
return nil, ErrAttrNotFound
}
flavId, ok := s.(string)
if !ok {
return nil, ErrAttrNotFound
}
rsrc, ok := i.flavor_to_resource[flavId]
if !ok {
return nil, ErrNotFound
}
glog.V(4).Infof("GetNodeResources(%v) => %v", name, rsrc)
return rsrc, nil
}
func (os *OpenStack) Clusters() (cloudprovider.Clusters, bool) {
return nil, false
}

View File

@ -149,12 +149,6 @@ func TestInstances(t *testing.T) {
t.Fatalf("Instances.NodeAddresses(%s) failed: %s", srvs[0], err)
}
t.Logf("Found NodeAddresses(%s) = %s\n", srvs[0], addrs)
rsrcs, err := i.GetNodeResources(srvs[0])
if err != nil {
t.Fatalf("Instances.GetNodeResources(%s) failed: %s", srvs[0], err)
}
t.Logf("Found GetNodeResources(%s) = %s\n", srvs[0], rsrcs)
}
func TestTCPLoadBalancer(t *testing.T) {

View File

@ -271,10 +271,6 @@ func (v *OVirtCloud) List(filter string) ([]string, error) {
return instances.ListSortedNames(), nil
}
func (v *OVirtCloud) GetNodeResources(name string) (*api.NodeResources, error) {
return nil, nil
}
// Implementation of Instances.CurrentNodeName
func (v *OVirtCloud) CurrentNodeName(hostname string) (string, error) {
return hostname, nil

View File

@ -29,12 +29,10 @@ import (
os_servers "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
"github.com/rackspace/gophercloud/pagination"
"github.com/rackspace/gophercloud/rackspace"
"github.com/rackspace/gophercloud/rackspace/compute/v2/flavors"
"github.com/rackspace/gophercloud/rackspace/compute/v2/servers"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/cloudprovider"
)
@ -141,8 +139,7 @@ func newRackspace(cfg Config) (*Rackspace, error) {
}
type Instances struct {
compute *gophercloud.ServiceClient
flavor_to_resource map[string]*api.NodeResources // keyed by flavor id
compute *gophercloud.ServiceClient
}
// Instances returns an implementation of Instances for Rackspace.
@ -156,38 +153,9 @@ func (os *Rackspace) Instances() (cloudprovider.Instances, bool) {
glog.Warningf("Failed to find compute endpoint: %v", err)
return nil, false
}
pager := flavors.ListDetail(compute, nil)
flavor_to_resource := make(map[string]*api.NodeResources)
err = pager.EachPage(func(page pagination.Page) (bool, error) {
flavorList, err := flavors.ExtractFlavors(page)
if err != nil {
return false, err
}
for _, flavor := range flavorList {
rsrc := api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(int64(flavor.VCPUs*1000), resource.DecimalSI),
api.ResourceMemory: resource.MustParse(fmt.Sprintf("%dMi", flavor.RAM)),
"openstack.org/disk": resource.MustParse(fmt.Sprintf("%dG", flavor.Disk)),
"openstack.org/rxTxFactor": *resource.NewQuantity(int64(flavor.RxTxFactor*1000), resource.DecimalSI),
"openstack.org/swap": resource.MustParse(fmt.Sprintf("%dMi", flavor.Swap)),
},
}
flavor_to_resource[flavor.ID] = &rsrc
}
return true, nil
})
if err != nil {
glog.Warningf("Failed to find compute flavors: %v", err)
return nil, false
}
glog.V(2).Infof("Found %v compute flavors", len(flavor_to_resource))
glog.V(1).Info("Claiming to support Instances")
return &Instances{compute, flavor_to_resource}, true
return &Instances{compute}, true
}
func (i *Instances) List(name_filter string) ([]string, error) {
@ -385,32 +353,6 @@ func (i *Instances) CurrentNodeName(hostname string) (string, error) {
return hostname, nil
}
func (i *Instances) GetNodeResources(name string) (*api.NodeResources, error) {
glog.V(2).Infof("GetNodeResources(%v) called", name)
srv, err := getServerByName(i.compute, name)
if err != nil {
return nil, err
}
s, ok := srv.Flavor["id"]
if !ok {
return nil, ErrAttrNotFound
}
flavId, ok := s.(string)
if !ok {
return nil, ErrAttrNotFound
}
rsrc, ok := i.flavor_to_resource[flavId]
if !ok {
return nil, ErrNotFound
}
glog.V(2).Infof("GetNodeResources(%v) => %v", name, rsrc)
return rsrc, nil
}
func (os *Rackspace) Clusters() (cloudprovider.Clusters, bool) {
return nil, false
}

View File

@ -149,12 +149,6 @@ func TestInstances(t *testing.T) {
t.Fatalf("Instances.NodeAddresses(%s) failed: %s", srvs[0], err)
}
t.Logf("Found NodeAddresses(%s) = %s\n", srvs[0], addrs)
rsrcs, err := i.GetNodeResources(srvs[0])
if err != nil {
t.Fatalf("Instances.GetNodeResources(%s) failed: %s", srvs[0], err)
}
t.Logf("Found GetNodeResources(%s) = %s\n", srvs[0], rsrcs)
}
func TestZones(t *testing.T) {

View File

@ -265,7 +265,3 @@ func (v *VagrantCloud) List(filter string) ([]string, error) {
return instances, nil
}
func (v *VagrantCloud) GetNodeResources(name string) (*api.NodeResources, error) {
return nil, nil
}