Deprecate the following flags for node-controller:

--node-milli-cpu
  --node-memory
  --machines
  --minion-regexp
  --sync-nodes

Remove the following flags from the standalon kubernetes binary:
  --node-milli-cpu
  --node-memory
This commit is contained in:
Robert Bailey 2015-05-22 15:14:48 -07:00
parent 4ca2595ed3
commit d0bcf953e9
5 changed files with 29 additions and 76 deletions

View File

@ -37,7 +37,6 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
apierrors "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" apierrors "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver" "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
@ -189,15 +188,9 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
// TODO: Write an integration test for the replication controllers watch. // TODO: Write an integration test for the replication controllers watch.
go controllerManager.Run(3, util.NeverStop) go controllerManager.Run(3, util.NeverStop)
nodeResources := &api.NodeResources{ nodeController := nodecontroller.NewNodeController(nil, cl, 10, 5*time.Minute, util.NewFakeRateLimiter(),
Capacity: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
}}
nodeController := nodecontroller.NewNodeController(nil, "", nodeResources, cl, 10, 5*time.Minute, util.NewFakeRateLimiter(),
40*time.Second, 60*time.Second, 5*time.Second, nil, false) 40*time.Second, 60*time.Second, 5*time.Second, nil, false)
nodeController.Run(5*time.Second, true) nodeController.Run(5 * time.Second)
cadvisorInterface := new(cadvisor.Fake) cadvisorInterface := new(cadvisor.Fake)
// Kubelet (localhost) // Kubelet (localhost)

View File

@ -26,7 +26,6 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
@ -58,15 +57,11 @@ type CMServer struct {
CloudConfigFile string CloudConfigFile string
ConcurrentEndpointSyncs int ConcurrentEndpointSyncs int
ConcurrentRCSyncs int ConcurrentRCSyncs int
MinionRegexp string
NodeSyncPeriod time.Duration NodeSyncPeriod time.Duration
ResourceQuotaSyncPeriod time.Duration ResourceQuotaSyncPeriod time.Duration
NamespaceSyncPeriod time.Duration NamespaceSyncPeriod time.Duration
PVClaimBinderSyncPeriod time.Duration PVClaimBinderSyncPeriod time.Duration
RegisterRetryCount int RegisterRetryCount int
MachineList util.StringList
SyncNodeList bool
SyncNodeStatus bool
NodeMonitorGracePeriod time.Duration NodeMonitorGracePeriod time.Duration
NodeStartupGracePeriod time.Duration NodeStartupGracePeriod time.Duration
NodeMonitorPeriod time.Duration NodeMonitorPeriod time.Duration
@ -76,10 +71,6 @@ type CMServer struct {
DeletingPodsBurst int DeletingPodsBurst int
ServiceAccountKeyFile string ServiceAccountKeyFile string
// TODO: Discover these by pinging the host machines, and rip out these params.
NodeMilliCPU int64
NodeMemory resource.Quantity
ClusterName string ClusterName string
ClusterCIDR util.IPNet ClusterCIDR util.IPNet
AllocateNodeCIDRs bool AllocateNodeCIDRs bool
@ -87,6 +78,14 @@ type CMServer struct {
Master string Master string
Kubeconfig string Kubeconfig string
// The following fields are deprecated and unused except in flag parsing.
MinionRegexp string
MachineList util.StringList
SyncNodeList bool
SyncNodeStatus bool
NodeMilliCPU int64
NodeMemory resource.Quantity
} }
// NewCMServer creates a new CMServer with a default config. // NewCMServer creates a new CMServer with a default config.
@ -102,8 +101,6 @@ func NewCMServer() *CMServer {
PVClaimBinderSyncPeriod: 10 * time.Second, PVClaimBinderSyncPeriod: 10 * time.Second,
RegisterRetryCount: 10, RegisterRetryCount: 10,
PodEvictionTimeout: 5 * time.Minute, PodEvictionTimeout: 5 * time.Minute,
NodeMilliCPU: 1000,
NodeMemory: resource.MustParse("3Gi"),
SyncNodeList: true, SyncNodeList: true,
ClusterName: "kubernetes", ClusterName: "kubernetes",
} }
@ -119,6 +116,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load") fs.IntVar(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
fs.IntVar(&s.ConcurrentRCSyncs, "concurrent_rc_syncs", s.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more reponsive replica management, but more CPU (and network) load") fs.IntVar(&s.ConcurrentRCSyncs, "concurrent_rc_syncs", s.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more reponsive replica management, but more CPU (and network) load")
fs.StringVar(&s.MinionRegexp, "minion-regexp", s.MinionRegexp, "If non empty, and --cloud-provider is specified, a regular expression for matching minion VMs.") fs.StringVar(&s.MinionRegexp, "minion-regexp", s.MinionRegexp, "If non empty, and --cloud-provider is specified, a regular expression for matching minion VMs.")
fs.MarkDeprecated("minion-regexp", "will be removed in a future version")
fs.DurationVar(&s.NodeSyncPeriod, "node-sync-period", s.NodeSyncPeriod, ""+ fs.DurationVar(&s.NodeSyncPeriod, "node-sync-period", s.NodeSyncPeriod, ""+
"The period for syncing nodes from cloudprovider. Longer periods will result in "+ "The period for syncing nodes from cloudprovider. Longer periods will result in "+
"fewer calls to cloud provider, but may delay addition of new nodes to cluster.") "fewer calls to cloud provider, but may delay addition of new nodes to cluster.")
@ -131,7 +129,9 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+ fs.IntVar(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+
"The number of retries for initial node registration. Retry interval equals node-sync-period.") "The number of retries for initial node registration. Retry interval equals node-sync-period.")
fs.Var(&s.MachineList, "machines", "List of machines to schedule onto, comma separated.") fs.Var(&s.MachineList, "machines", "List of machines to schedule onto, comma separated.")
fs.MarkDeprecated("machines", "will be removed in a future version")
fs.BoolVar(&s.SyncNodeList, "sync-nodes", s.SyncNodeList, "If true, and --cloud-provider is specified, sync nodes from the cloud provider. Default true.") fs.BoolVar(&s.SyncNodeList, "sync-nodes", s.SyncNodeList, "If true, and --cloud-provider is specified, sync nodes from the cloud provider. Default true.")
fs.MarkDeprecated("sync-nodes", "will be removed in a future version")
fs.BoolVar(&s.SyncNodeStatus, "sync-node-status", s.SyncNodeStatus, fs.BoolVar(&s.SyncNodeStatus, "sync-node-status", s.SyncNodeStatus,
"DEPRECATED. Does not have any effect now and it will be removed in a later release.") "DEPRECATED. Does not have any effect now and it will be removed in a later release.")
fs.DurationVar(&s.NodeMonitorGracePeriod, "node-monitor-grace-period", 40*time.Second, fs.DurationVar(&s.NodeMonitorGracePeriod, "node-monitor-grace-period", 40*time.Second,
@ -143,10 +143,10 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.DurationVar(&s.NodeMonitorPeriod, "node-monitor-period", 5*time.Second, fs.DurationVar(&s.NodeMonitorPeriod, "node-monitor-period", 5*time.Second,
"The period for syncing NodeStatus in NodeController.") "The period for syncing NodeStatus in NodeController.")
fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA key used to sign service account tokens.") fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA key used to sign service account tokens.")
// TODO: Discover these by pinging the host machines, and rip out these flags.
// TODO: in the meantime, use resource.QuantityFlag() instead of these
fs.Int64Var(&s.NodeMilliCPU, "node-milli-cpu", s.NodeMilliCPU, "The amount of MilliCPU provisioned on each node") fs.Int64Var(&s.NodeMilliCPU, "node-milli-cpu", s.NodeMilliCPU, "The amount of MilliCPU provisioned on each node")
fs.MarkDeprecated("node-milli-cpu", "will be removed in a future version")
fs.Var(resource.NewQuantityFlagValue(&s.NodeMemory), "node-memory", "The amount of memory (in bytes) provisioned on each node") fs.Var(resource.NewQuantityFlagValue(&s.NodeMemory), "node-memory", "The amount of memory (in bytes) provisioned on each node")
fs.MarkDeprecated("node-memory", "will be removed in a future version")
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/") fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
fs.Var(&s.ClusterCIDR, "cluster-cidr", "CIDR Range for Pods in cluster.") fs.Var(&s.ClusterCIDR, "cluster-cidr", "CIDR Range for Pods in cluster.")
fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.") fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
@ -154,25 +154,8 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")
} }
func (s *CMServer) verifyMinionFlags() {
if !s.SyncNodeList && s.MinionRegexp != "" {
glog.Info("--minion-regexp is ignored by --sync-nodes=false")
}
if s.CloudProvider == "" || s.MinionRegexp == "" {
if len(s.MachineList) == 0 {
glog.Info("No machines specified!")
}
return
}
if len(s.MachineList) != 0 {
glog.Info("--machines is overwritten by --minion-regexp")
}
}
// Run runs the CMServer. This should never exit. // Run runs the CMServer. This should never exit.
func (s *CMServer) Run(_ []string) error { func (s *CMServer) Run(_ []string) error {
s.verifyMinionFlags()
if s.Kubeconfig == "" && s.Master == "" { if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
} }
@ -218,21 +201,15 @@ func (s *CMServer) Run(_ []string) error {
go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop) go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop)
cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
nodeResources := &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(s.NodeMilliCPU, resource.DecimalSI),
api.ResourceMemory: s.NodeMemory,
},
}
if s.SyncNodeStatus { if s.SyncNodeStatus {
glog.Warning("DEPRECATION NOTICE: sync-node-status flag is being deprecated. It has no effect now and it will be removed in a future version.") glog.Warning("DEPRECATION NOTICE: sync-node-status flag is being deprecated. It has no effect now and it will be removed in a future version.")
} }
nodeController := nodecontroller.NewNodeController(cloud, s.MinionRegexp, nodeResources, nodeController := nodecontroller.NewNodeController(cloud, kubeClient, s.RegisterRetryCount,
kubeClient, s.RegisterRetryCount, s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs) s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod, s.SyncNodeList) nodeController.Run(s.NodeSyncPeriod)
serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName) serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName)
if err := serviceController.Run(s.NodeSyncPeriod); err != nil { if err := serviceController.Run(s.NodeSyncPeriod); err != nil {

View File

@ -30,7 +30,6 @@ import (
kubeletapp "github.com/GoogleCloudPlatform/kubernetes/cmd/kubelet/app" kubeletapp "github.com/GoogleCloudPlatform/kubernetes/cmd/kubelet/app"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver" "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
@ -53,13 +52,10 @@ import (
) )
var ( var (
addr = flag.String("addr", "127.0.0.1", "The address to use for the apiserver.") addr = flag.String("addr", "127.0.0.1", "The address to use for the apiserver.")
port = flag.Int("port", 8080, "The port for the apiserver to use.") port = flag.Int("port", 8080, "The port for the apiserver to use.")
dockerEndpoint = flag.String("docker-endpoint", "", "If non-empty, use this for the docker endpoint to communicate with") dockerEndpoint = flag.String("docker-endpoint", "", "If non-empty, use this for the docker endpoint to communicate with")
etcdServer = flag.String("etcd-server", "http://localhost:4001", "If non-empty, path to the set of etcd server to use") etcdServer = flag.String("etcd-server", "http://localhost:4001", "If non-empty, path to the set of etcd server to use")
// TODO: Discover these by pinging the host machines, and rip out these flags.
nodeMilliCPU = flag.Int64("node-milli-cpu", 1000, "The amount of MilliCPU provisioned on each node")
nodeMemory = flag.Int64("node-memory", 3*1024*1024*1024, "The amount of memory (in bytes) provisioned on each node")
masterServiceNamespace = flag.String("master-service-namespace", api.NamespaceDefault, "The namespace from which the kubernetes master services should be injected into pods") masterServiceNamespace = flag.String("master-service-namespace", api.NamespaceDefault, "The namespace from which the kubernetes master services should be injected into pods")
enableProfiling = flag.Bool("profiling", false, "Enable profiling via web interface host:port/debug/pprof/") enableProfiling = flag.Bool("profiling", false, "Enable profiling via web interface host:port/debug/pprof/")
deletingPodsQps = flag.Float32("deleting-pods-qps", 0.1, "") deletingPodsQps = flag.Float32("deleting-pods-qps", 0.1, "")
@ -123,19 +119,12 @@ func runScheduler(cl *client.Client) {
} }
// RunControllerManager starts a controller // RunControllerManager starts a controller
func runControllerManager(cl *client.Client, nodeMilliCPU, nodeMemory int64) { func runControllerManager(cl *client.Client) {
nodeResources := &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(nodeMilliCPU, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(nodeMemory, resource.BinarySI),
},
}
const nodeSyncPeriod = 10 * time.Second const nodeSyncPeriod = 10 * time.Second
nodeController := nodecontroller.NewNodeController( nodeController := nodecontroller.NewNodeController(
nil, "", nodeResources, cl, 10, 5*time.Minute, util.NewTokenBucketRateLimiter(*deletingPodsQps, *deletingPodsBurst), nil, cl, 10, 5*time.Minute, util.NewTokenBucketRateLimiter(*deletingPodsQps, *deletingPodsBurst),
40*time.Second, 60*time.Second, 5*time.Second, nil, false) 40*time.Second, 60*time.Second, 5*time.Second, nil, false)
nodeController.Run(nodeSyncPeriod, true) nodeController.Run(nodeSyncPeriod)
serviceController := servicecontroller.New(nil, cl, "kubernetes") serviceController := servicecontroller.New(nil, cl, "kubernetes")
if err := serviceController.Run(nodeSyncPeriod); err != nil { if err := serviceController.Run(nodeSyncPeriod); err != nil {
@ -152,7 +141,7 @@ func runControllerManager(cl *client.Client, nodeMilliCPU, nodeMemory int64) {
func startComponents(etcdClient tools.EtcdClient, cl *client.Client, addr net.IP, port int) { func startComponents(etcdClient tools.EtcdClient, cl *client.Client, addr net.IP, port int) {
runApiServer(etcdClient, addr, port, *masterServiceNamespace) runApiServer(etcdClient, addr, port, *masterServiceNamespace)
runScheduler(cl) runScheduler(cl)
runControllerManager(cl, *nodeMilliCPU, *nodeMemory) runControllerManager(cl)
dockerClient := dockertools.ConnectToDockerOrDie(*dockerEndpoint) dockerClient := dockertools.ConnectToDockerOrDie(*dockerEndpoint)
cadvisorInterface, err := cadvisor.New(0) cadvisorInterface, err := cadvisor.New(0)

View File

@ -50,8 +50,6 @@ type nodeStatusData struct {
type NodeController struct { type NodeController struct {
cloud cloudprovider.Interface cloud cloudprovider.Interface
matchRE string
staticResources *api.NodeResources
kubeClient client.Interface kubeClient client.Interface
recorder record.EventRecorder recorder record.EventRecorder
registerRetryCount int registerRetryCount int
@ -93,8 +91,6 @@ type NodeController struct {
// NewNodeController returns a new node controller to sync instances from cloudprovider. // NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController( func NewNodeController(
cloud cloudprovider.Interface, cloud cloudprovider.Interface,
matchRE string,
staticResources *api.NodeResources,
kubeClient client.Interface, kubeClient client.Interface,
registerRetryCount int, registerRetryCount int,
podEvictionTimeout time.Duration, podEvictionTimeout time.Duration,
@ -117,8 +113,6 @@ func NewNodeController(
} }
return &NodeController{ return &NodeController{
cloud: cloud, cloud: cloud,
matchRE: matchRE,
staticResources: staticResources,
kubeClient: kubeClient, kubeClient: kubeClient,
recorder: recorder, recorder: recorder,
registerRetryCount: registerRetryCount, registerRetryCount: registerRetryCount,
@ -178,7 +172,7 @@ func (nc *NodeController) reconcileNodeCIDRs(nodes *api.NodeList) {
} }
// Run starts an asynchronous loop that monitors the status of cluster nodes. // Run starts an asynchronous loop that monitors the status of cluster nodes.
func (nc *NodeController) Run(period time.Duration, syncNodeList bool) { func (nc *NodeController) Run(period time.Duration) {
// Incorporate the results of node status pushed from kubelet to master. // Incorporate the results of node status pushed from kubelet to master.
go util.Forever(func() { go util.Forever(func() {
if err := nc.monitorNodeStatus(); err != nil { if err := nc.monitorNodeStatus(); err != nil {

View File

@ -324,7 +324,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
} }
for _, item := range table { for _, item := range table {
nodeController := NewNodeController(nil, "", nil, item.fakeNodeHandler, 10, nodeController := NewNodeController(nil, item.fakeNodeHandler, 10,
evictionTimeout, util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, evictionTimeout, util.NewFakeRateLimiter(), testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false) testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
nodeController.now = func() util.Time { return fakeNow } nodeController.now = func() util.Time { return fakeNow }
@ -527,7 +527,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
} }
for _, item := range table { for _, item := range table {
nodeController := NewNodeController(nil, "", nil, item.fakeNodeHandler, 10, 5*time.Minute, util.NewFakeRateLimiter(), nodeController := NewNodeController(nil, item.fakeNodeHandler, 10, 5*time.Minute, util.NewFakeRateLimiter(),
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false) testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
nodeController.now = func() util.Time { return fakeNow } nodeController.now = func() util.Time { return fakeNow }
if err := nodeController.monitorNodeStatus(); err != nil { if err := nodeController.monitorNodeStatus(); err != nil {