Merge pull request #83736 from krzysied/kubemark_node_labels

Kubemark node labels
This commit is contained in:
Kubernetes Prow Robot 2019-10-11 05:17:47 -07:00 committed by GitHub
commit 833e8dc10b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 52 additions and 32 deletions

View File

@ -65,6 +65,7 @@ type hollowNodeConfig struct {
UseRealProxier bool
ProxierSyncPeriod time.Duration
ProxierMinSyncPeriod time.Duration
NodeLabels map[string]string
}
const (
@ -87,6 +88,8 @@ func (c *hollowNodeConfig) addFlags(fs *pflag.FlagSet) {
fs.BoolVar(&c.UseRealProxier, "use-real-proxier", true, "Set to true if you want to use real proxier inside hollow-proxy.")
fs.DurationVar(&c.ProxierSyncPeriod, "proxier-sync-period", 30*time.Second, "Period that proxy rules are refreshed in hollow-proxy.")
fs.DurationVar(&c.ProxierMinSyncPeriod, "proxier-min-sync-period", 0, "Minimum period that proxy rules are refreshed in hollow-proxy.")
bindableNodeLabels := cliflag.ConfigurationMap(c.NodeLabels)
fs.Var(&bindableNodeLabels, "node-labels", "Additional node labels")
}
func (c *hollowNodeConfig) createClientConfigFromFile() (*restclient.Config, error) {
@ -104,6 +107,17 @@ func (c *hollowNodeConfig) createClientConfigFromFile() (*restclient.Config, err
return config, nil
}
func (c *hollowNodeConfig) createHollowKubeletOptions() *kubemark.HollowKubletOptions {
return &kubemark.HollowKubletOptions{
NodeName: c.NodeName,
KubeletPort: c.KubeletPort,
KubeletReadOnlyPort: c.KubeletReadOnlyPort,
MaxPods: maxPods,
PodsPerCore: podsPerCore,
NodeLabels: c.NodeLabels,
}
}
func main() {
rand.Seed(time.Now().UnixNano())
@ -125,7 +139,9 @@ func main() {
// newControllerManagerCommand creates a *cobra.Command object with default parameters
func newHollowNodeCommand() *cobra.Command {
s := &hollowNodeConfig{}
s := &hollowNodeConfig{
NodeLabels: make(map[string]string),
}
cmd := &cobra.Command{
Use: "kubemark",
@ -160,7 +176,7 @@ func run(config *hollowNodeConfig) {
}
if config.Morph == "kubelet" {
f, c := kubemark.GetHollowKubeletConfig(config.NodeName, config.KubeletPort, config.KubeletReadOnlyPort, maxPods, podsPerCore)
f, c := kubemark.GetHollowKubeletConfig(config.createHollowKubeletOptions())
heartbeatClientConfig := *clientConfig
heartbeatClientConfig.Timeout = c.NodeStatusUpdateFrequency.Duration

View File

@ -134,15 +134,19 @@ func (hk *HollowKubelet) Run() {
select {}
}
// HollowKubletOptions contains settable parameters for hollow kubelet.
type HollowKubletOptions struct {
NodeName string
KubeletPort int
KubeletReadOnlyPort int
MaxPods int
PodsPerCore int
NodeLabels map[string]string
}
// Builds a KubeletConfiguration for the HollowKubelet, ensuring that the
// usual defaults are applied for fields we do not override.
func GetHollowKubeletConfig(
nodeName string,
kubeletPort int,
kubeletReadOnlyPort int,
maxPods int,
podsPerCore int) (*options.KubeletFlags, *kubeletconfig.KubeletConfiguration) {
func GetHollowKubeletConfig(opt *HollowKubletOptions) (*options.KubeletFlags, *kubeletconfig.KubeletConfiguration) {
testRootDir := utils.MakeTempDirOrDie("hollow-kubelet.", "")
podFilePath := utils.MakeTempDirOrDie("static-pods", testRootDir)
klog.Infof("Using %s as root dir for hollow-kubelet", testRootDir)
@ -151,13 +155,13 @@ func GetHollowKubeletConfig(
f := options.NewKubeletFlags()
f.EnableServer = true
f.RootDirectory = testRootDir
f.HostnameOverride = nodeName
f.HostnameOverride = opt.NodeName
f.MinimumGCAge = metav1.Duration{Duration: 1 * time.Minute}
f.MaxContainerCount = 100
f.MaxPerPodContainerCount = 2
f.RegisterNode = true
f.RegisterSchedulable = true
f.ProviderID = fmt.Sprintf("kubemark://%v", nodeName)
f.ProviderID = fmt.Sprintf("kubemark://%v", opt.NodeName)
// Config struct
c, err := options.NewKubeletConfiguration()
@ -167,8 +171,8 @@ func GetHollowKubeletConfig(
c.StaticPodURL = ""
c.Address = "0.0.0.0" /* bind address */
c.Port = int32(kubeletPort)
c.ReadOnlyPort = int32(kubeletReadOnlyPort)
c.Port = int32(opt.KubeletPort)
c.ReadOnlyPort = int32(opt.KubeletReadOnlyPort)
c.StaticPodPath = podFilePath
c.FileCheckFrequency.Duration = 20 * time.Second
c.HTTPCheckFrequency.Duration = 20 * time.Second
@ -176,8 +180,8 @@ func GetHollowKubeletConfig(
c.NodeStatusReportFrequency.Duration = time.Minute
c.SyncFrequency.Duration = 10 * time.Second
c.EvictionPressureTransitionPeriod.Duration = 5 * time.Minute
c.MaxPods = int32(maxPods)
c.PodsPerCore = int32(podsPerCore)
c.MaxPods = int32(opt.MaxPods)
c.PodsPerCore = int32(opt.PodsPerCore)
c.ClusterDNS = []string{}
c.ImageGCHighThresholdPercent = 90
c.ImageGCLowThresholdPercent = 80

View File

@ -106,17 +106,17 @@ function create-kube-hollow-node-resources {
# Create addon pods.
# Heapster.
mkdir -p "${RESOURCE_DIRECTORY}/addons"
sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
sed "s@{{MASTER_IP}}@${MASTER_IP}@g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_mem_per_node=4
metrics_mem=$((200 + metrics_mem_per_node*NUM_NODES))
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
sed -i'' -e "s@{{METRICS_MEM}}@${metrics_mem}@g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_cpu_per_node_numerator=${NUM_NODES}
metrics_cpu_per_node_denominator=2
metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator))
sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
sed -i'' -e "s@{{METRICS_CPU}}@${metrics_cpu}@g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
eventer_mem_per_node=500
eventer_mem=$((200 * 1024 + eventer_mem_per_node*NUM_NODES))
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
sed -i'' -e "s@{{EVENTER_MEM}}@${eventer_mem}@g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
# Cluster Autoscaler.
if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
@ -128,15 +128,15 @@ function create-kube-hollow-node-resources {
echo "Setting maximum cluster size to ${NUM_NODES}."
KUBEMARK_MIG_CONFIG="autoscaling.k8s.io/nodegroup: ${KUBEMARK_AUTOSCALER_MIG_NAME}"
sed "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/cluster-autoscaler_template.json" > "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s/{{kubemark_autoscaler_mig_name}}/${KUBEMARK_AUTOSCALER_MIG_NAME}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s/{{kubemark_autoscaler_min_nodes}}/${KUBEMARK_AUTOSCALER_MIN_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s/{{kubemark_autoscaler_max_nodes}}/${KUBEMARK_AUTOSCALER_MAX_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s@{{kubemark_autoscaler_mig_name}}@${KUBEMARK_AUTOSCALER_MIG_NAME}@g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s@{{kubemark_autoscaler_min_nodes}}@${KUBEMARK_AUTOSCALER_MIN_NODES}@g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s@{{kubemark_autoscaler_max_nodes}}@${KUBEMARK_AUTOSCALER_MAX_NODES}@g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
fi
# Kube DNS.
if [[ "${ENABLE_KUBEMARK_KUBE_DNS:-}" == "true" ]]; then
echo "Setting up kube-dns"
sed "s/{{dns_domain}}/${KUBE_DNS_DOMAIN}/g" "${RESOURCE_DIRECTORY}/kube_dns_template.yaml" > "${RESOURCE_DIRECTORY}/addons/kube_dns.yaml"
sed "s@{{dns_domain}}@${KUBE_DNS_DOMAIN}@g" "${RESOURCE_DIRECTORY}/kube_dns_template.yaml" > "${RESOURCE_DIRECTORY}/addons/kube_dns.yaml"
fi
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark"
@ -144,21 +144,21 @@ function create-kube-hollow-node-resources {
# Create the replication controller for hollow-nodes.
# We allow to override the NUM_REPLICAS when running Cluster Autoscaler.
NUM_REPLICAS=${NUM_REPLICAS:-${NUM_NODES}}
sed "s/{{numreplicas}}/${NUM_REPLICAS}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.yaml" > "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed "s@{{numreplicas}}@${NUM_REPLICAS}@g" "${RESOURCE_DIRECTORY}/hollow-node_template.yaml" > "${RESOURCE_DIRECTORY}/hollow-node.yaml"
proxy_cpu=20
if [ "${NUM_NODES}" -gt 1000 ]; then
proxy_cpu=50
fi
proxy_mem_per_node=50
proxy_mem=$((100 * 1024 + proxy_mem_per_node*NUM_NODES))
sed -i'' -e "s/{{HOLLOW_PROXY_CPU}}/${proxy_cpu}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{HOLLOW_PROXY_MEM}}/${proxy_mem}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s'{{kubemark_image_registry}}'${KUBEMARK_IMAGE_REGISTRY}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{kubemark_image_tag}}/${KUBEMARK_IMAGE_TAG}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{hollow_kubelet_params}}/${HOLLOW_KUBELET_TEST_ARGS}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{hollow_proxy_params}}/${HOLLOW_PROXY_TEST_ARGS}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s'{{kubemark_mig_config}}'${KUBEMARK_MIG_CONFIG:-}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s@{{HOLLOW_PROXY_CPU}}@${proxy_cpu}@g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s@{{HOLLOW_PROXY_MEM}}@${proxy_mem}@g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s@{{kubemark_image_registry}}@${KUBEMARK_IMAGE_REGISTRY}@g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s@{{kubemark_image_tag}}@${KUBEMARK_IMAGE_TAG}@g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s@{{master_ip}}@${MASTER_IP}@g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s@{{hollow_kubelet_params}}@${HOLLOW_KUBELET_TEST_ARGS}@g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s@{{hollow_proxy_params}}@${HOLLOW_PROXY_TEST_ARGS}@g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s@{{kubemark_mig_config}}@${KUBEMARK_MIG_CONFIG:-}@g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" --namespace="kubemark"
echo "Created secrets, configMaps, replication-controllers required for hollow-nodes."