mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-15 06:43:54 +00:00
Merge branch 'master' into upgrade_aliases_branch
This commit is contained in:
commit
4434108c4a
@ -1,3 +1,6 @@
|
|||||||
|
### Version 8.4 (Thu November 30 2017 zou nengren @zouyee)
|
||||||
|
- Update kubectl to v1.8.4.
|
||||||
|
|
||||||
### Version 6.4-beta.2 (Mon June 12 2017 Jeff Grafton <jgrafton@google.com>)
|
### Version 6.4-beta.2 (Mon June 12 2017 Jeff Grafton <jgrafton@google.com>)
|
||||||
- Update kubectl to v1.6.4.
|
- Update kubectl to v1.6.4.
|
||||||
- Refresh base images.
|
- Refresh base images.
|
||||||
|
@ -15,8 +15,8 @@
|
|||||||
IMAGE=gcr.io/google-containers/kube-addon-manager
|
IMAGE=gcr.io/google-containers/kube-addon-manager
|
||||||
ARCH?=amd64
|
ARCH?=amd64
|
||||||
TEMP_DIR:=$(shell mktemp -d)
|
TEMP_DIR:=$(shell mktemp -d)
|
||||||
VERSION=v6.5
|
VERSION=v8.4
|
||||||
KUBECTL_VERSION?=v1.6.4
|
KUBECTL_VERSION?=v1.8.4
|
||||||
|
|
||||||
ifeq ($(ARCH),amd64)
|
ifeq ($(ARCH),amd64)
|
||||||
BASEIMAGE?=bashell/alpine-bash
|
BASEIMAGE?=bashell/alpine-bash
|
||||||
@ -40,7 +40,7 @@ all: build
|
|||||||
|
|
||||||
build:
|
build:
|
||||||
cp ./* $(TEMP_DIR)
|
cp ./* $(TEMP_DIR)
|
||||||
curl -sSL --retry 5 https://storage.googleapis.com/kubernetes-release/release/$(KUBECTL_VERSION)/bin/linux/$(ARCH)/kubectl > $(TEMP_DIR)/kubectl
|
curl -sSL --retry 5 https://dl.k8s.io/release/$(KUBECTL_VERSION)/bin/linux/$(ARCH)/kubectl > $(TEMP_DIR)/kubectl
|
||||||
chmod +x $(TEMP_DIR)/kubectl
|
chmod +x $(TEMP_DIR)/kubectl
|
||||||
cd $(TEMP_DIR) && sed -i.back "s|BASEIMAGE|$(BASEIMAGE)|g" Dockerfile
|
cd $(TEMP_DIR) && sed -i.back "s|BASEIMAGE|$(BASEIMAGE)|g" Dockerfile
|
||||||
docker build --pull -t $(IMAGE)-$(ARCH):$(VERSION) $(TEMP_DIR)
|
docker build --pull -t $(IMAGE)-$(ARCH):$(VERSION) $(TEMP_DIR)
|
||||||
|
@ -77,7 +77,7 @@ func ProbeAttachableVolumePlugins() []volume.VolumePlugin {
|
|||||||
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
|
||||||
allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
|
||||||
}
|
}
|
||||||
return allPlugins
|
return allPlugins
|
||||||
|
@ -156,26 +156,24 @@ func SetDefaults_NodeConfiguration(obj *NodeConfiguration) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDefaultsEtcdSelfHosted sets defaults for self-hosted etcd
|
// SetDefaultsEtcdSelfHosted sets defaults for self-hosted etcd if used
|
||||||
func SetDefaultsEtcdSelfHosted(obj *MasterConfiguration) {
|
func SetDefaultsEtcdSelfHosted(obj *MasterConfiguration) {
|
||||||
if obj.Etcd.SelfHosted == nil {
|
if obj.Etcd.SelfHosted != nil {
|
||||||
obj.Etcd.SelfHosted = &SelfHostedEtcd{}
|
if obj.Etcd.SelfHosted.ClusterServiceName == "" {
|
||||||
}
|
obj.Etcd.SelfHosted.ClusterServiceName = DefaultEtcdClusterServiceName
|
||||||
|
}
|
||||||
|
|
||||||
if obj.Etcd.SelfHosted.ClusterServiceName == "" {
|
if obj.Etcd.SelfHosted.EtcdVersion == "" {
|
||||||
obj.Etcd.SelfHosted.ClusterServiceName = DefaultEtcdClusterServiceName
|
obj.Etcd.SelfHosted.EtcdVersion = constants.DefaultEtcdVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
if obj.Etcd.SelfHosted.EtcdVersion == "" {
|
if obj.Etcd.SelfHosted.OperatorVersion == "" {
|
||||||
obj.Etcd.SelfHosted.EtcdVersion = constants.DefaultEtcdVersion
|
obj.Etcd.SelfHosted.OperatorVersion = DefaultEtcdOperatorVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
if obj.Etcd.SelfHosted.OperatorVersion == "" {
|
if obj.Etcd.SelfHosted.CertificatesDir == "" {
|
||||||
obj.Etcd.SelfHosted.OperatorVersion = DefaultEtcdOperatorVersion
|
obj.Etcd.SelfHosted.CertificatesDir = DefaultEtcdCertDir
|
||||||
}
|
}
|
||||||
|
|
||||||
if obj.Etcd.SelfHosted.CertificatesDir == "" {
|
|
||||||
obj.Etcd.SelfHosted.CertificatesDir = DefaultEtcdCertDir
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,7 +147,7 @@ type NodeConfiguration struct {
|
|||||||
|
|
||||||
// KubeletConfiguration contains elements describing initial remote configuration of kubelet
|
// KubeletConfiguration contains elements describing initial remote configuration of kubelet
|
||||||
type KubeletConfiguration struct {
|
type KubeletConfiguration struct {
|
||||||
BaseConfig *kubeletconfigv1alpha1.KubeletConfiguration `json:"baseConfig"`
|
BaseConfig *kubeletconfigv1alpha1.KubeletConfiguration `json:"baseConfig,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// HostPathMount contains elements describing volumes that are mounted from the
|
// HostPathMount contains elements describing volumes that are mounted from the
|
||||||
|
@ -433,7 +433,7 @@ func (i *Init) Run(out io.Writer) error {
|
|||||||
// Temporary control plane is up, now we create our self hosted control
|
// Temporary control plane is up, now we create our self hosted control
|
||||||
// plane components and remove the static manifests:
|
// plane components and remove the static manifests:
|
||||||
fmt.Println("[self-hosted] Creating self-hosted control plane.")
|
fmt.Println("[self-hosted] Creating self-hosted control plane.")
|
||||||
if err := selfhostingphase.CreateSelfHostedControlPlane(manifestDir, kubeConfigDir, i.cfg, client, waiter); err != nil {
|
if err := selfhostingphase.CreateSelfHostedControlPlane(manifestDir, kubeConfigDir, i.cfg, client, waiter, i.dryRun); err != nil {
|
||||||
return fmt.Errorf("error creating self hosted control plane: %v", err)
|
return fmt.Errorf("error creating self hosted control plane: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@ func getSelfhostingSubCommand() *cobra.Command {
|
|||||||
|
|
||||||
// Converts the Static Pod-hosted control plane into a self-hosted one
|
// Converts the Static Pod-hosted control plane into a self-hosted one
|
||||||
waiter := apiclient.NewKubeWaiter(client, 2*time.Minute, os.Stdout)
|
waiter := apiclient.NewKubeWaiter(client, 2*time.Minute, os.Stdout)
|
||||||
err = selfhosting.CreateSelfHostedControlPlane(constants.GetStaticPodDirectory(), constants.KubernetesDir, internalcfg, client, waiter)
|
err = selfhosting.CreateSelfHostedControlPlane(constants.GetStaticPodDirectory(), constants.KubernetesDir, internalcfg, client, waiter, false)
|
||||||
kubeadmutil.CheckErr(err)
|
kubeadmutil.CheckErr(err)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
|
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
|
||||||
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
|
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||||
|
"k8s.io/kubernetes/cmd/kubeadm/app/features"
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
|
"k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade"
|
"k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade"
|
||||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||||
@ -119,14 +120,11 @@ func NewCmdApply(parentFlags *cmdUpgradeFlags) *cobra.Command {
|
|||||||
func RunApply(flags *applyFlags) error {
|
func RunApply(flags *applyFlags) error {
|
||||||
|
|
||||||
// Start with the basics, verify that the cluster is healthy and get the configuration from the cluster (using the ConfigMap)
|
// Start with the basics, verify that the cluster is healthy and get the configuration from the cluster (using the ConfigMap)
|
||||||
upgradeVars, err := enforceRequirements(flags.parent.featureGatesString, flags.parent.kubeConfigPath, flags.parent.cfgPath, flags.parent.printConfig, flags.dryRun, flags.parent.ignorePreflightErrorsSet)
|
upgradeVars, err := enforceRequirements(flags.parent, flags.dryRun, flags.newK8sVersionStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the upgraded version on the external config object now
|
|
||||||
upgradeVars.cfg.KubernetesVersion = flags.newK8sVersionStr
|
|
||||||
|
|
||||||
// Grab the external, versioned configuration and convert it to the internal type for usage here later
|
// Grab the external, versioned configuration and convert it to the internal type for usage here later
|
||||||
internalcfg := &kubeadmapi.MasterConfiguration{}
|
internalcfg := &kubeadmapi.MasterConfiguration{}
|
||||||
legacyscheme.Scheme.Convert(upgradeVars.cfg, internalcfg, nil)
|
legacyscheme.Scheme.Convert(upgradeVars.cfg, internalcfg, nil)
|
||||||
@ -144,6 +142,10 @@ func RunApply(flags *applyFlags) error {
|
|||||||
}
|
}
|
||||||
flags.newK8sVersion = k8sVer
|
flags.newK8sVersion = k8sVer
|
||||||
|
|
||||||
|
if err := features.ValidateVersion(features.InitFeatureGates, internalcfg.FeatureGates, internalcfg.KubernetesVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Enforce the version skew policies
|
// Enforce the version skew policies
|
||||||
if err := EnforceVersionPolicies(flags, upgradeVars.versionGetter); err != nil {
|
if err := EnforceVersionPolicies(flags, upgradeVars.versionGetter); err != nil {
|
||||||
return fmt.Errorf("[upgrade/version] FATAL: %v", err)
|
return fmt.Errorf("[upgrade/version] FATAL: %v", err)
|
||||||
@ -167,7 +169,7 @@ func RunApply(flags *applyFlags) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Upgrade RBAC rules and addons.
|
// Upgrade RBAC rules and addons.
|
||||||
if err := upgrade.PerformPostUpgradeTasks(upgradeVars.client, internalcfg, flags.newK8sVersion); err != nil {
|
if err := upgrade.PerformPostUpgradeTasks(upgradeVars.client, internalcfg, flags.newK8sVersion, flags.dryRun); err != nil {
|
||||||
return fmt.Errorf("[upgrade/postupgrade] FATAL post-upgrade error: %v", err)
|
return fmt.Errorf("[upgrade/postupgrade] FATAL post-upgrade error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,35 +48,37 @@ type upgradeVariables struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// enforceRequirements verifies that it's okay to upgrade and then returns the variables needed for the rest of the procedure
|
// enforceRequirements verifies that it's okay to upgrade and then returns the variables needed for the rest of the procedure
|
||||||
func enforceRequirements(featureGatesString, kubeConfigPath, cfgPath string, printConfig, dryRun bool, ignoreChecksErrors sets.String) (*upgradeVariables, error) {
|
func enforceRequirements(flags *cmdUpgradeFlags, dryRun bool, newK8sVersion string) (*upgradeVariables, error) {
|
||||||
client, err := getClient(kubeConfigPath, dryRun)
|
client, err := getClient(flags.kubeConfigPath, dryRun)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't create a Kubernetes client from file %q: %v", kubeConfigPath, err)
|
return nil, fmt.Errorf("couldn't create a Kubernetes client from file %q: %v", flags.kubeConfigPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run healthchecks against the cluster
|
// Run healthchecks against the cluster
|
||||||
if err := upgrade.CheckClusterHealth(client, ignoreChecksErrors); err != nil {
|
if err := upgrade.CheckClusterHealth(client, flags.ignorePreflightErrorsSet); err != nil {
|
||||||
return nil, fmt.Errorf("[upgrade/health] FATAL: %v", err)
|
return nil, fmt.Errorf("[upgrade/health] FATAL: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch the configuration from a file or ConfigMap and validate it
|
// Fetch the configuration from a file or ConfigMap and validate it
|
||||||
cfg, err := upgrade.FetchConfiguration(client, os.Stdout, cfgPath)
|
cfg, err := upgrade.FetchConfiguration(client, os.Stdout, flags.cfgPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("[upgrade/config] FATAL: %v", err)
|
return nil, fmt.Errorf("[upgrade/config] FATAL: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If a new k8s version should be set, apply the change before printing the config
|
||||||
|
if len(newK8sVersion) != 0 {
|
||||||
|
cfg.KubernetesVersion = newK8sVersion
|
||||||
|
}
|
||||||
|
|
||||||
// If the user told us to print this information out; do it!
|
// If the user told us to print this information out; do it!
|
||||||
if printConfig {
|
if flags.printConfig {
|
||||||
printConfiguration(cfg, os.Stdout)
|
printConfiguration(cfg, os.Stdout)
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.FeatureGates, err = features.NewFeatureGate(&features.InitFeatureGates, featureGatesString)
|
cfg.FeatureGates, err = features.NewFeatureGate(&features.InitFeatureGates, flags.featureGatesString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("[upgrade/config] FATAL: %v", err)
|
return nil, fmt.Errorf("[upgrade/config] FATAL: %v", err)
|
||||||
}
|
}
|
||||||
if err := features.ValidateVersion(features.InitFeatureGates, cfg.FeatureGates, cfg.KubernetesVersion); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &upgradeVariables{
|
return &upgradeVariables{
|
||||||
client: client,
|
client: client,
|
||||||
|
@ -52,8 +52,7 @@ func TestPrintConfiguration(t *testing.T) {
|
|||||||
keyFile: ""
|
keyFile: ""
|
||||||
imageRepository: ""
|
imageRepository: ""
|
||||||
kubeProxy: {}
|
kubeProxy: {}
|
||||||
kubeletConfiguration:
|
kubeletConfiguration: {}
|
||||||
baseConfig: null
|
|
||||||
kubernetesVersion: v1.7.1
|
kubernetesVersion: v1.7.1
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: ""
|
dnsDomain: ""
|
||||||
@ -86,8 +85,7 @@ func TestPrintConfiguration(t *testing.T) {
|
|||||||
keyFile: ""
|
keyFile: ""
|
||||||
imageRepository: ""
|
imageRepository: ""
|
||||||
kubeProxy: {}
|
kubeProxy: {}
|
||||||
kubeletConfiguration:
|
kubeletConfiguration: {}
|
||||||
baseConfig: null
|
|
||||||
kubernetesVersion: v1.7.1
|
kubernetesVersion: v1.7.1
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: ""
|
dnsDomain: ""
|
||||||
@ -130,8 +128,7 @@ func TestPrintConfiguration(t *testing.T) {
|
|||||||
operatorVersion: v0.1.0
|
operatorVersion: v0.1.0
|
||||||
imageRepository: ""
|
imageRepository: ""
|
||||||
kubeProxy: {}
|
kubeProxy: {}
|
||||||
kubeletConfiguration:
|
kubeletConfiguration: {}
|
||||||
baseConfig: null
|
|
||||||
kubernetesVersion: v1.7.1
|
kubernetesVersion: v1.7.1
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: ""
|
dnsDomain: ""
|
||||||
|
@ -54,8 +54,8 @@ func NewCmdPlan(parentFlags *cmdUpgradeFlags) *cobra.Command {
|
|||||||
|
|
||||||
// RunPlan takes care of outputting available versions to upgrade to for the user
|
// RunPlan takes care of outputting available versions to upgrade to for the user
|
||||||
func RunPlan(parentFlags *cmdUpgradeFlags) error {
|
func RunPlan(parentFlags *cmdUpgradeFlags) error {
|
||||||
// Start with the basics, verify that the cluster is healthy, build a client and a versionGetter. Never set dry-run for plan.
|
// Start with the basics, verify that the cluster is healthy, build a client and a versionGetter. Never dry-run when planning.
|
||||||
upgradeVars, err := enforceRequirements(parentFlags.featureGatesString, parentFlags.kubeConfigPath, parentFlags.cfgPath, parentFlags.printConfig, false, parentFlags.ignorePreflightErrorsSet)
|
upgradeVars, err := enforceRequirements(parentFlags, false, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -33,10 +33,10 @@ const (
|
|||||||
// CoreDNS is alpha in v1.9
|
// CoreDNS is alpha in v1.9
|
||||||
CoreDNS = "CoreDNS"
|
CoreDNS = "CoreDNS"
|
||||||
|
|
||||||
// SelfHosting is beta in v1.8
|
// SelfHosting is beta in v1.9
|
||||||
SelfHosting = "SelfHosting"
|
SelfHosting = "SelfHosting"
|
||||||
|
|
||||||
// StoreCertsInSecrets is alpha in v1.8
|
// StoreCertsInSecrets is alpha in v1.8 and v1.9
|
||||||
StoreCertsInSecrets = "StoreCertsInSecrets"
|
StoreCertsInSecrets = "StoreCertsInSecrets"
|
||||||
|
|
||||||
// DynamicKubeletConfig is alpha in v1.9
|
// DynamicKubeletConfig is alpha in v1.9
|
||||||
@ -47,9 +47,10 @@ var v190 = version.MustParseSemantic("v1.9.0-alpha.1")
|
|||||||
|
|
||||||
// InitFeatureGates are the default feature gates for the init command
|
// InitFeatureGates are the default feature gates for the init command
|
||||||
var InitFeatureGates = FeatureList{
|
var InitFeatureGates = FeatureList{
|
||||||
SelfHosting: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Beta}},
|
SelfHosting: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Beta}},
|
||||||
StoreCertsInSecrets: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}},
|
StoreCertsInSecrets: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}},
|
||||||
HighAvailability: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
|
// We don't want to advertise this feature gate exists in v1.9 to avoid confusion as it is not yet working
|
||||||
|
HighAvailability: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190, HiddenInHelpText: true},
|
||||||
CoreDNS: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
|
CoreDNS: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
|
||||||
DynamicKubeletConfig: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
|
DynamicKubeletConfig: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
|
||||||
}
|
}
|
||||||
@ -57,7 +58,8 @@ var InitFeatureGates = FeatureList{
|
|||||||
// Feature represents a feature being gated
|
// Feature represents a feature being gated
|
||||||
type Feature struct {
|
type Feature struct {
|
||||||
utilfeature.FeatureSpec
|
utilfeature.FeatureSpec
|
||||||
MinimumVersion *version.Version
|
MinimumVersion *version.Version
|
||||||
|
HiddenInHelpText bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// FeatureList represents a list of feature gates
|
// FeatureList represents a list of feature gates
|
||||||
@ -113,6 +115,10 @@ func Keys(featureList FeatureList) []string {
|
|||||||
func KnownFeatures(f *FeatureList) []string {
|
func KnownFeatures(f *FeatureList) []string {
|
||||||
var known []string
|
var known []string
|
||||||
for k, v := range *f {
|
for k, v := range *f {
|
||||||
|
if v.HiddenInHelpText {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
pre := ""
|
pre := ""
|
||||||
if v.PreRelease != utilfeature.GA {
|
if v.PreRelease != utilfeature.GA {
|
||||||
pre = fmt.Sprintf("%s - ", v.PreRelease)
|
pre = fmt.Sprintf("%s - ", v.PreRelease)
|
||||||
|
@ -43,7 +43,7 @@ import (
|
|||||||
|
|
||||||
// CreateBaseKubeletConfiguration creates base kubelet configuration for dynamic kubelet configuration feature.
|
// CreateBaseKubeletConfiguration creates base kubelet configuration for dynamic kubelet configuration feature.
|
||||||
func CreateBaseKubeletConfiguration(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
func CreateBaseKubeletConfiguration(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||||
fmt.Printf("[kubelet] Uploading a ConfigMap %q in namespace %s with base configuration for the kubelets in the cluster",
|
fmt.Printf("[kubelet] Uploading a ConfigMap %q in namespace %s with base configuration for the kubelets in the cluster\n",
|
||||||
kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem)
|
kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem)
|
||||||
|
|
||||||
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
|
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
|
||||||
@ -95,7 +95,7 @@ func ConsumeBaseKubeletConfiguration(nodeName string) error {
|
|||||||
|
|
||||||
// updateNodeWithConfigMap updates node ConfigSource with KubeletBaseConfigurationConfigMap
|
// updateNodeWithConfigMap updates node ConfigSource with KubeletBaseConfigurationConfigMap
|
||||||
func updateNodeWithConfigMap(client clientset.Interface, nodeName string) error {
|
func updateNodeWithConfigMap(client clientset.Interface, nodeName string) error {
|
||||||
fmt.Printf("[kubelet] Using Dynamic Kubelet Config for node %q; config sourced from ConfigMap %q in namespace %s",
|
fmt.Printf("[kubelet] Using Dynamic Kubelet Config for node %q; config sourced from ConfigMap %q in namespace %s\n",
|
||||||
nodeName, kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem)
|
nodeName, kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem)
|
||||||
|
|
||||||
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
|
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
|
||||||
@ -203,7 +203,7 @@ func getLocalNodeTLSBootstrappedClient() (clientset.Interface, error) {
|
|||||||
|
|
||||||
// WriteInitKubeletConfigToDiskOnMaster writes base kubelet configuration to disk on master.
|
// WriteInitKubeletConfigToDiskOnMaster writes base kubelet configuration to disk on master.
|
||||||
func WriteInitKubeletConfigToDiskOnMaster(cfg *kubeadmapi.MasterConfiguration) error {
|
func WriteInitKubeletConfigToDiskOnMaster(cfg *kubeadmapi.MasterConfiguration) error {
|
||||||
fmt.Printf("[kubelet] Writing base configuration of kubelets to disk on master node %s", cfg.NodeName)
|
fmt.Printf("[kubelet] Writing base configuration of kubelets to disk on master node %s\n", cfg.NodeName)
|
||||||
|
|
||||||
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
|
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -52,7 +52,7 @@ const (
|
|||||||
// 8. In order to avoid race conditions, we have to make sure that static pod is deleted correctly before we continue
|
// 8. In order to avoid race conditions, we have to make sure that static pod is deleted correctly before we continue
|
||||||
// Otherwise, there is a race condition when we proceed without kubelet having restarted the API server correctly and the next .Create call flakes
|
// Otherwise, there is a race condition when we proceed without kubelet having restarted the API server correctly and the next .Create call flakes
|
||||||
// 9. Do that for the kube-apiserver, kube-controller-manager and kube-scheduler in a loop
|
// 9. Do that for the kube-apiserver, kube-controller-manager and kube-scheduler in a loop
|
||||||
func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, waiter apiclient.Waiter) error {
|
func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, waiter apiclient.Waiter, dryRun bool) error {
|
||||||
|
|
||||||
// Adjust the timeout slightly to something self-hosting specific
|
// Adjust the timeout slightly to something self-hosting specific
|
||||||
waiter.SetTimeout(selfHostingWaitTimeout)
|
waiter.SetTimeout(selfHostingWaitTimeout)
|
||||||
@ -104,9 +104,11 @@ func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubea
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the old Static Pod manifest
|
// Remove the old Static Pod manifest if not dryrunning
|
||||||
if err := os.RemoveAll(manifestPath); err != nil {
|
if !dryRun {
|
||||||
return fmt.Errorf("unable to delete static pod manifest for %s [%v]", componentName, err)
|
if err := os.RemoveAll(manifestPath); err != nil {
|
||||||
|
return fmt.Errorf("unable to delete static pod manifest for %s [%v]", componentName, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the mirror Pod hash to be removed; otherwise we'll run into race conditions here when the kubelet hasn't had time to
|
// Wait for the mirror Pod hash to be removed; otherwise we'll run into race conditions here when the kubelet hasn't had time to
|
||||||
|
@ -36,6 +36,7 @@ go_library(
|
|||||||
"//cmd/kubeadm/app/util:go_default_library",
|
"//cmd/kubeadm/app/util:go_default_library",
|
||||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||||
"//cmd/kubeadm/app/util/config:go_default_library",
|
"//cmd/kubeadm/app/util/config:go_default_library",
|
||||||
|
"//cmd/kubeadm/app/util/dryrun:go_default_library",
|
||||||
"//pkg/api/legacyscheme:go_default_library",
|
"//pkg/api/legacyscheme:go_default_library",
|
||||||
"//pkg/util/version:go_default_library",
|
"//pkg/util/version:go_default_library",
|
||||||
"//pkg/version:go_default_library",
|
"//pkg/version:go_default_library",
|
||||||
|
@ -28,6 +28,9 @@ const (
|
|||||||
// MaximumAllowedMinorVersionUpgradeSkew describes how many minor versions kubeadm can upgrade the control plane version in one go
|
// MaximumAllowedMinorVersionUpgradeSkew describes how many minor versions kubeadm can upgrade the control plane version in one go
|
||||||
MaximumAllowedMinorVersionUpgradeSkew = 1
|
MaximumAllowedMinorVersionUpgradeSkew = 1
|
||||||
|
|
||||||
|
// MaximumAllowedMinorVersionDowngradeSkew describes how many minor versions kubeadm can upgrade the control plane version in one go
|
||||||
|
MaximumAllowedMinorVersionDowngradeSkew = 1
|
||||||
|
|
||||||
// MaximumAllowedMinorVersionKubeletSkew describes how many minor versions the control plane version and the kubelet can skew in a kubeadm cluster
|
// MaximumAllowedMinorVersionKubeletSkew describes how many minor versions the control plane version and the kubelet can skew in a kubeadm cluster
|
||||||
MaximumAllowedMinorVersionKubeletSkew = 1
|
MaximumAllowedMinorVersionKubeletSkew = 1
|
||||||
)
|
)
|
||||||
@ -72,23 +75,41 @@ func EnforceVersionPolicies(versionGetter VersionGetter, newK8sVersionStr string
|
|||||||
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is equal to or lower than the minimum supported version %q. Please specify a higher version to upgrade to", newK8sVersionStr, clusterVersionStr))
|
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is equal to or lower than the minimum supported version %q. Please specify a higher version to upgrade to", newK8sVersionStr, clusterVersionStr))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure new version is higher than the current Kubernetes version
|
// kubeadm doesn't support upgrades between two minor versions; e.g. a v1.7 -> v1.9 upgrade is not supported right away
|
||||||
if clusterVersion.AtLeast(newK8sVersion) {
|
if newK8sVersion.Minor() > clusterVersion.Minor()+MaximumAllowedMinorVersionUpgradeSkew {
|
||||||
// Even though we don't officially support downgrades, it "should work", and if user(s) need it and are willing to try; they can do so with --force
|
tooLargeUpgradeSkewErr := fmt.Errorf("Specified version to upgrade to %q is too high; kubeadm can upgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionUpgradeSkew)
|
||||||
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Specified version to upgrade to %q is equal to or lower than the cluster version %q. Downgrades are not supported yet", newK8sVersionStr, clusterVersionStr))
|
// If the version that we're about to upgrade to is a released version, we should fully enforce this policy
|
||||||
} else {
|
// If the version is a CI/dev/experimental version, it's okay to jump two minor version steps, but then require the -f flag
|
||||||
// If this code path runs, it's an upgrade (this code will run most of the time)
|
if len(newK8sVersion.PreRelease()) == 0 {
|
||||||
// kubeadm doesn't support upgrades between two minor versions; e.g. a v1.7 -> v1.9 upgrade is not supported. Enforce that here
|
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeUpgradeSkewErr)
|
||||||
if newK8sVersion.Minor() > clusterVersion.Minor()+MaximumAllowedMinorVersionUpgradeSkew {
|
} else {
|
||||||
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is too high; kubeadm can upgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionUpgradeSkew))
|
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeUpgradeSkewErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// kubeadm doesn't support downgrades between two minor versions; e.g. a v1.9 -> v1.7 downgrade is not supported right away
|
||||||
|
if newK8sVersion.Minor() < clusterVersion.Minor()-MaximumAllowedMinorVersionDowngradeSkew {
|
||||||
|
tooLargeDowngradeSkewErr := fmt.Errorf("Specified version to downgrade to %q is too low; kubeadm can downgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionDowngradeSkew)
|
||||||
|
// If the version that we're about to downgrade to is a released version, we should fully enforce this policy
|
||||||
|
// If the version is a CI/dev/experimental version, it's okay to jump two minor version steps, but then require the -f flag
|
||||||
|
if len(newK8sVersion.PreRelease()) == 0 {
|
||||||
|
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeDowngradeSkewErr)
|
||||||
|
} else {
|
||||||
|
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeDowngradeSkewErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the kubeadm version is lower than what we want to upgrade to; error
|
// If the kubeadm version is lower than what we want to upgrade to; error
|
||||||
if kubeadmVersion.LessThan(newK8sVersion) {
|
if kubeadmVersion.LessThan(newK8sVersion) {
|
||||||
if newK8sVersion.Minor() > kubeadmVersion.Minor() {
|
if newK8sVersion.Minor() > kubeadmVersion.Minor() {
|
||||||
// This is totally unsupported; kubeadm has no idea how it should handle a newer minor release than itself
|
tooLargeKubeadmSkew := fmt.Errorf("Specified version to upgrade to %q is at least one minor release higher than the kubeadm minor release (%d > %d). Such an upgrade is not supported", newK8sVersionStr, newK8sVersion.Minor(), kubeadmVersion.Minor())
|
||||||
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is one minor release higher than the kubeadm minor release (%d > %d). Such an upgrade is not supported", newK8sVersionStr, newK8sVersion.Minor(), kubeadmVersion.Minor()))
|
// This is unsupported; kubeadm has no idea how it should handle a newer minor release than itself
|
||||||
|
// If the version is a CI/dev/experimental version though, lower the severity of this check, but then require the -f flag
|
||||||
|
if len(newK8sVersion.PreRelease()) == 0 {
|
||||||
|
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeKubeadmSkew)
|
||||||
|
} else {
|
||||||
|
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeKubeadmSkew)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// Upgrading to a higher patch version than kubeadm is ok if the user specifies --force. Not recommended, but possible.
|
// Upgrading to a higher patch version than kubeadm is ok if the user specifies --force. Not recommended, but possible.
|
||||||
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Specified version to upgrade to %q is higher than the kubeadm version %q. Upgrade kubeadm first using the tool you used to install kubeadm", newK8sVersionStr, kubeadmVersionStr))
|
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Specified version to upgrade to %q is higher than the kubeadm version %q. Upgrade kubeadm first using the tool you used to install kubeadm", newK8sVersionStr, kubeadmVersionStr))
|
||||||
|
@ -46,23 +46,21 @@ func TestEnforceVersionPolicies(t *testing.T) {
|
|||||||
},
|
},
|
||||||
newK8sVersion: "v1.9.0",
|
newK8sVersion: "v1.9.0",
|
||||||
},
|
},
|
||||||
{ // downgrades not supported
|
{ // downgrades ok
|
||||||
vg: &fakeVersionGetter{
|
vg: &fakeVersionGetter{
|
||||||
clusterVersion: "v1.8.3",
|
clusterVersion: "v1.8.3",
|
||||||
kubeletVersion: "v1.8.3",
|
kubeletVersion: "v1.8.3",
|
||||||
kubeadmVersion: "v1.8.3",
|
kubeadmVersion: "v1.8.3",
|
||||||
},
|
},
|
||||||
newK8sVersion: "v1.8.2",
|
newK8sVersion: "v1.8.2",
|
||||||
expectedSkippableErrs: 1,
|
|
||||||
},
|
},
|
||||||
{ // upgrades without bumping the version number not supported yet. TODO: Change this?
|
{ // upgrades without bumping the version number ok
|
||||||
vg: &fakeVersionGetter{
|
vg: &fakeVersionGetter{
|
||||||
clusterVersion: "v1.8.3",
|
clusterVersion: "v1.8.3",
|
||||||
kubeletVersion: "v1.8.3",
|
kubeletVersion: "v1.8.3",
|
||||||
kubeadmVersion: "v1.8.3",
|
kubeadmVersion: "v1.8.3",
|
||||||
},
|
},
|
||||||
newK8sVersion: "v1.8.3",
|
newK8sVersion: "v1.8.3",
|
||||||
expectedSkippableErrs: 1,
|
|
||||||
},
|
},
|
||||||
{ // new version must be higher than v1.8.0
|
{ // new version must be higher than v1.8.0
|
||||||
vg: &fakeVersionGetter{
|
vg: &fakeVersionGetter{
|
||||||
@ -72,7 +70,6 @@ func TestEnforceVersionPolicies(t *testing.T) {
|
|||||||
},
|
},
|
||||||
newK8sVersion: "v1.7.10",
|
newK8sVersion: "v1.7.10",
|
||||||
expectedMandatoryErrs: 1, // version must be higher than v1.8.0
|
expectedMandatoryErrs: 1, // version must be higher than v1.8.0
|
||||||
expectedSkippableErrs: 1, // version shouldn't be downgraded
|
|
||||||
},
|
},
|
||||||
{ // upgrading two minor versions in one go is not supported
|
{ // upgrading two minor versions in one go is not supported
|
||||||
vg: &fakeVersionGetter{
|
vg: &fakeVersionGetter{
|
||||||
@ -84,6 +81,15 @@ func TestEnforceVersionPolicies(t *testing.T) {
|
|||||||
expectedMandatoryErrs: 1, // can't upgrade two minor versions
|
expectedMandatoryErrs: 1, // can't upgrade two minor versions
|
||||||
expectedSkippableErrs: 1, // kubelet <-> apiserver skew too large
|
expectedSkippableErrs: 1, // kubelet <-> apiserver skew too large
|
||||||
},
|
},
|
||||||
|
{ // downgrading two minor versions in one go is not supported
|
||||||
|
vg: &fakeVersionGetter{
|
||||||
|
clusterVersion: "v1.10.3",
|
||||||
|
kubeletVersion: "v1.10.3",
|
||||||
|
kubeadmVersion: "v1.10.0",
|
||||||
|
},
|
||||||
|
newK8sVersion: "v1.8.3",
|
||||||
|
expectedMandatoryErrs: 1, // can't downgrade two minor versions
|
||||||
|
},
|
||||||
{ // kubeadm version must be higher than the new kube version. However, patch version skews may be forced
|
{ // kubeadm version must be higher than the new kube version. However, patch version skews may be forced
|
||||||
vg: &fakeVersionGetter{
|
vg: &fakeVersionGetter{
|
||||||
clusterVersion: "v1.8.3",
|
clusterVersion: "v1.8.3",
|
||||||
|
@ -18,6 +18,8 @@ package upgrade
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/errors"
|
"k8s.io/apimachinery/pkg/util/errors"
|
||||||
@ -31,14 +33,16 @@ import (
|
|||||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo"
|
"k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo"
|
||||||
nodebootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
|
nodebootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
|
||||||
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
|
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
|
||||||
|
"k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting"
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig"
|
"k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig"
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||||
|
dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun"
|
||||||
"k8s.io/kubernetes/pkg/util/version"
|
"k8s.io/kubernetes/pkg/util/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PerformPostUpgradeTasks runs nearly the same functions as 'kubeadm init' would do
|
// PerformPostUpgradeTasks runs nearly the same functions as 'kubeadm init' would do
|
||||||
// Note that the markmaster phase is left out, not needed, and no token is created as that doesn't belong to the upgrade
|
// Note that the markmaster phase is left out, not needed, and no token is created as that doesn't belong to the upgrade
|
||||||
func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version) error {
|
func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
|
||||||
errs := []error{}
|
errs := []error{}
|
||||||
|
|
||||||
// Upload currently used configuration to the cluster
|
// Upload currently used configuration to the cluster
|
||||||
@ -63,6 +67,11 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
|||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Upgrade to a self-hosted control plane if possible
|
||||||
|
if err := upgradeToSelfHosting(client, cfg, newK8sVer, dryRun); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: Is this needed to do here? I think that updating cluster info should probably be separate from a normal upgrade
|
// TODO: Is this needed to do here? I think that updating cluster info should probably be separate from a normal upgrade
|
||||||
// Create the cluster-info ConfigMap with the associated RBAC rules
|
// Create the cluster-info ConfigMap with the associated RBAC rules
|
||||||
// if err := clusterinfo.CreateBootstrapConfigMapIfNotExists(client, kubeadmconstants.GetAdminKubeConfigPath()); err != nil {
|
// if err := clusterinfo.CreateBootstrapConfigMapIfNotExists(client, kubeadmconstants.GetAdminKubeConfigPath()); err != nil {
|
||||||
@ -92,9 +101,11 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
|||||||
if err := dns.EnsureDNSAddon(cfg, client); err != nil {
|
if err := dns.EnsureDNSAddon(cfg, client); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
|
// Remove the old kube-dns deployment if coredns is now used
|
||||||
if err := coreDNSDeployment(cfg, client); err != nil {
|
if !dryRun {
|
||||||
errs = append(errs, err)
|
if err := removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg, client); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := proxy.EnsureProxyAddon(cfg, client); err != nil {
|
if err := proxy.EnsureProxyAddon(cfg, client); err != nil {
|
||||||
@ -103,22 +114,41 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
|||||||
return errors.NewAggregate(errs)
|
return errors.NewAggregate(errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func coreDNSDeployment(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
func removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||||
if features.Enabled(cfg.FeatureGates, features.CoreDNS) {
|
if features.Enabled(cfg.FeatureGates, features.CoreDNS) {
|
||||||
return apiclient.TryRunCommand(func() error {
|
return apiclient.TryRunCommand(func() error {
|
||||||
getCoreDNS, err := client.AppsV1beta2().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNS, metav1.GetOptions{})
|
coreDNSDeployment, err := client.AppsV1beta2().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNS, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if getCoreDNS.Status.ReadyReplicas == 0 {
|
if coreDNSDeployment.Status.ReadyReplicas == 0 {
|
||||||
return fmt.Errorf("the CodeDNS deployment isn't ready yet")
|
return fmt.Errorf("the CodeDNS deployment isn't ready yet")
|
||||||
}
|
}
|
||||||
err = client.AppsV1beta2().Deployments(metav1.NamespaceSystem).Delete(kubeadmconstants.KubeDNS, nil)
|
return apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, kubeadmconstants.KubeDNS)
|
||||||
if err != nil {
|
}, 10)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, 5)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func upgradeToSelfHosting(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
|
||||||
|
if features.Enabled(cfg.FeatureGates, features.SelfHosting) && !IsControlPlaneSelfHosted(client) && newK8sVer.AtLeast(v190alpha3) {
|
||||||
|
|
||||||
|
waiter := getWaiter(dryRun, client)
|
||||||
|
|
||||||
|
// kubeadm will now convert the static Pod-hosted control plane into a self-hosted one
|
||||||
|
fmt.Println("[self-hosted] Creating self-hosted control plane.")
|
||||||
|
if err := selfhosting.CreateSelfHostedControlPlane(kubeadmconstants.GetStaticPodDirectory(), kubeadmconstants.KubernetesDir, cfg, client, waiter, dryRun); err != nil {
|
||||||
|
return fmt.Errorf("error creating self hosted control plane: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getWaiter gets the right waiter implementation for the right occasion
|
||||||
|
// TODO: Consolidate this with what's in init.go?
|
||||||
|
func getWaiter(dryRun bool, client clientset.Interface) apiclient.Waiter {
|
||||||
|
if dryRun {
|
||||||
|
return dryrunutil.NewWaiter()
|
||||||
|
}
|
||||||
|
return apiclient.NewKubeWaiter(client, 30*time.Minute, os.Stdout)
|
||||||
|
}
|
||||||
|
@ -30,7 +30,9 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/util/version"
|
"k8s.io/kubernetes/pkg/util/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO: Maybe move these constants elsewhere in future releases
|
||||||
var v190 = version.MustParseSemantic("v1.9.0")
|
var v190 = version.MustParseSemantic("v1.9.0")
|
||||||
|
var v190alpha3 = version.MustParseSemantic("v1.9.0-alpha.3")
|
||||||
var expiry = 180 * 24 * time.Hour
|
var expiry = 180 * 24 * time.Hour
|
||||||
|
|
||||||
// backupAPIServerCertAndKey backups the old cert and key of kube-apiserver to a specified directory.
|
// backupAPIServerCertAndKey backups the old cert and key of kube-apiserver to a specified directory.
|
||||||
|
@ -107,6 +107,15 @@ func DeleteDaemonSetForeground(client clientset.Interface, namespace, name strin
|
|||||||
return client.AppsV1beta2().DaemonSets(namespace).Delete(name, deleteOptions)
|
return client.AppsV1beta2().DaemonSets(namespace).Delete(name, deleteOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted
|
||||||
|
func DeleteDeploymentForeground(client clientset.Interface, namespace, name string) error {
|
||||||
|
foregroundDelete := metav1.DeletePropagationForeground
|
||||||
|
deleteOptions := &metav1.DeleteOptions{
|
||||||
|
PropagationPolicy: &foregroundDelete,
|
||||||
|
}
|
||||||
|
return client.AppsV1beta2().Deployments(namespace).Delete(name, deleteOptions)
|
||||||
|
}
|
||||||
|
|
||||||
// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error {
|
func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error {
|
||||||
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(role); err != nil {
|
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(role); err != nil {
|
||||||
|
@ -80,7 +80,7 @@ func checkErr(prefix string, err error, handleErr func(string, int)) {
|
|||||||
func FormatErrMsg(errs []error) string {
|
func FormatErrMsg(errs []error) string {
|
||||||
var errMsg string
|
var errMsg string
|
||||||
for _, err := range errs {
|
for _, err := range errs {
|
||||||
errMsg = fmt.Sprintf("%s\t-%s\n", errMsg, err.Error())
|
errMsg = fmt.Sprintf("%s\t- %s\n", errMsg, err.Error())
|
||||||
}
|
}
|
||||||
return errMsg
|
return errMsg
|
||||||
}
|
}
|
||||||
|
@ -64,13 +64,13 @@ func TestFormatErrMsg(t *testing.T) {
|
|||||||
fmt.Errorf(errMsg1),
|
fmt.Errorf(errMsg1),
|
||||||
fmt.Errorf(errMsg2),
|
fmt.Errorf(errMsg2),
|
||||||
},
|
},
|
||||||
expect: "\t-" + errMsg1 + "\n" + "\t-" + errMsg2 + "\n",
|
expect: "\t- " + errMsg1 + "\n" + "\t- " + errMsg2 + "\n",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
errs: []error{
|
errs: []error{
|
||||||
fmt.Errorf(errMsg1),
|
fmt.Errorf(errMsg1),
|
||||||
},
|
},
|
||||||
expect: "\t-" + errMsg1 + "\n",
|
expect: "\t- " + errMsg1 + "\n",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
|
|||||||
allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
|
||||||
allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
|
||||||
}
|
}
|
||||||
return allPlugins
|
return allPlugins
|
||||||
|
@ -603,3 +603,7 @@ func (adc *attachDetachController) addNodeToDswp(node *v1.Node, nodeName types.N
|
|||||||
func (adc *attachDetachController) GetNodeLabels() (map[string]string, error) {
|
func (adc *attachDetachController) GetNodeLabels() (map[string]string, error) {
|
||||||
return nil, fmt.Errorf("GetNodeLabels() unsupported in Attach/Detach controller")
|
return nil, fmt.Errorf("GetNodeLabels() unsupported in Attach/Detach controller")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (adc *attachDetachController) GetNodeName() types.NodeName {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
@ -277,3 +277,7 @@ func (expc *expandController) GetConfigMapFunc() func(namespace, name string) (*
|
|||||||
func (expc *expandController) GetNodeLabels() (map[string]string, error) {
|
func (expc *expandController) GetNodeLabels() (map[string]string, error) {
|
||||||
return nil, fmt.Errorf("GetNodeLabels unsupported in expandController")
|
return nil, fmt.Errorf("GetNodeLabels unsupported in expandController")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (expc *expandController) GetNodeName() types.NodeName {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
@ -108,3 +108,7 @@ func (adc *PersistentVolumeController) GetExec(pluginName string) mount.Exec {
|
|||||||
func (ctrl *PersistentVolumeController) GetNodeLabels() (map[string]string, error) {
|
func (ctrl *PersistentVolumeController) GetNodeLabels() (map[string]string, error) {
|
||||||
return nil, fmt.Errorf("GetNodeLabels() unsupported in PersistentVolumeController")
|
return nil, fmt.Errorf("GetNodeLabels() unsupported in PersistentVolumeController")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ctrl *PersistentVolumeController) GetNodeName() types.NodeName {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
@ -188,6 +188,10 @@ func (kvh *kubeletVolumeHost) GetNodeLabels() (map[string]string, error) {
|
|||||||
return node.Labels, nil
|
return node.Labels, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) GetNodeName() types.NodeName {
|
||||||
|
return kvh.kubelet.nodeName
|
||||||
|
}
|
||||||
|
|
||||||
func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec {
|
func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec {
|
||||||
exec, err := kvh.getMountExec(pluginName)
|
exec, err := kvh.getMountExec(pluginName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -52,19 +52,20 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
|
|||||||
csiSource, err := getCSISourceFromSpec(spec)
|
csiSource, err := getCSISourceFromSpec(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err))
|
glog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err))
|
||||||
return "", errors.New("missing CSI persistent volume")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
node := string(nodeName)
|
||||||
pvName := spec.PersistentVolume.GetName()
|
pvName := spec.PersistentVolume.GetName()
|
||||||
attachID := getAttachmentName(csiSource.VolumeHandle, string(nodeName))
|
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, node)
|
||||||
|
|
||||||
attachment := &storage.VolumeAttachment{
|
attachment := &storage.VolumeAttachment{
|
||||||
ObjectMeta: meta.ObjectMeta{
|
ObjectMeta: meta.ObjectMeta{
|
||||||
Name: attachID,
|
Name: attachID,
|
||||||
},
|
},
|
||||||
Spec: storage.VolumeAttachmentSpec{
|
Spec: storage.VolumeAttachmentSpec{
|
||||||
NodeName: string(nodeName),
|
NodeName: node,
|
||||||
Attacher: csiPluginName,
|
Attacher: csiSource.Driver,
|
||||||
Source: storage.VolumeAttachmentSource{
|
Source: storage.VolumeAttachmentSource{
|
||||||
PersistentVolumeName: &pvName,
|
PersistentVolumeName: &pvName,
|
||||||
},
|
},
|
||||||
@ -72,7 +73,7 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
|
|||||||
Status: storage.VolumeAttachmentStatus{Attached: false},
|
Status: storage.VolumeAttachmentStatus{Attached: false},
|
||||||
}
|
}
|
||||||
|
|
||||||
attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
|
_, err = c.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
|
||||||
alreadyExist := false
|
alreadyExist := false
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !apierrs.IsAlreadyExists(err) {
|
if !apierrs.IsAlreadyExists(err) {
|
||||||
@ -83,19 +84,23 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
|
|||||||
}
|
}
|
||||||
|
|
||||||
if alreadyExist {
|
if alreadyExist {
|
||||||
glog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attach.GetName(), csiSource.VolumeHandle))
|
glog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle))
|
||||||
} else {
|
} else {
|
||||||
glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully, will start probing for updates", attach.GetName(), csiSource.VolumeHandle))
|
glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle))
|
||||||
}
|
}
|
||||||
|
|
||||||
// probe for attachment update here
|
// probe for attachment update here
|
||||||
// NOTE: any error from waiting for attachment is logged only. This is because
|
// NOTE: any error from waiting for attachment is logged only. This is because
|
||||||
// the primariy intent of the enclosing method is to create VolumeAttachment.
|
// the primariy intent of the enclosing method is to create VolumeAttachment.
|
||||||
// DONOT return that error here as it is mitigated in attacher.WaitForAttach.
|
// DONOT return that error here as it is mitigated in attacher.WaitForAttach.
|
||||||
|
volAttachmentOK := true
|
||||||
if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil {
|
if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil {
|
||||||
glog.Error(log("attacher.Attach encountered error during attachment probing: %v", err))
|
volAttachmentOK = false
|
||||||
|
glog.Error(log("attacher.Attach attempted to wait for attachment to be ready, but failed with: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment verified=%t: attachment object [%s]", volAttachmentOK, attachID))
|
||||||
|
|
||||||
return attachID, nil
|
return attachID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,7 +156,7 @@ func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, tim
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||||
glog.V(4).Info(log("probing attachment status for %d volumes ", len(specs)))
|
glog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs)))
|
||||||
|
|
||||||
attached := make(map[*volume.Spec]bool)
|
attached := make(map[*volume.Spec]bool)
|
||||||
|
|
||||||
@ -165,13 +170,15 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No
|
|||||||
glog.Error(log("attacher.VolumesAreAttached failed: %v", err))
|
glog.Error(log("attacher.VolumesAreAttached failed: %v", err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
attachID := getAttachmentName(source.VolumeHandle, string(nodeName))
|
|
||||||
|
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName))
|
||||||
glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
|
glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
|
||||||
attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
|
glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
glog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached))
|
||||||
attached[spec] = attach.Status.Attached
|
attached[spec] = attach.Status.Attached
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -201,10 +208,11 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
|
|||||||
glog.Error(log("detacher.Detach insufficient info encoded in volumeName"))
|
glog.Error(log("detacher.Detach insufficient info encoded in volumeName"))
|
||||||
return errors.New("volumeName missing expected data")
|
return errors.New("volumeName missing expected data")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
driverName := parts[0]
|
||||||
volID := parts[1]
|
volID := parts[1]
|
||||||
attachID := getAttachmentName(volID, string(nodeName))
|
attachID := getAttachmentName(volID, driverName, string(nodeName))
|
||||||
err := c.k8s.StorageV1alpha1().VolumeAttachments().Delete(attachID, nil)
|
if err := c.k8s.StorageV1alpha1().VolumeAttachments().Delete(attachID, nil); err != nil {
|
||||||
if err != nil {
|
|
||||||
glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
|
glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -257,12 +265,8 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func hashAttachmentName(volName, nodeName string) string {
|
// getAttachmentName returns csi-<sha252(volName,csiDriverName,NodeName>
|
||||||
result := sha256.Sum256([]byte(fmt.Sprintf("%s%s", volName, nodeName)))
|
func getAttachmentName(volName, csiDriverName, nodeName string) string {
|
||||||
return fmt.Sprintf("%x", result)
|
result := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", volName, csiDriverName, nodeName)))
|
||||||
}
|
return fmt.Sprintf("csi-%x", result)
|
||||||
|
|
||||||
func getAttachmentName(volName, nodeName string) string {
|
|
||||||
// TODO consider using a different prefix for attachment
|
|
||||||
return fmt.Sprintf("pv-%s", hashAttachmentName(volName, nodeName))
|
|
||||||
}
|
}
|
||||||
|
@ -17,13 +17,11 @@ limitations under the License.
|
|||||||
package csi
|
package csi
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
|
||||||
storage "k8s.io/api/storage/v1alpha1"
|
storage "k8s.io/api/storage/v1alpha1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -38,7 +36,7 @@ func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttach
|
|||||||
},
|
},
|
||||||
Spec: storage.VolumeAttachmentSpec{
|
Spec: storage.VolumeAttachmentSpec{
|
||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
Attacher: csiPluginName,
|
Attacher: "mock",
|
||||||
Source: storage.VolumeAttachmentSource{
|
Source: storage.VolumeAttachmentSource{
|
||||||
PersistentVolumeName: &pvName,
|
PersistentVolumeName: &pvName,
|
||||||
},
|
},
|
||||||
@ -64,47 +62,93 @@ func TestAttacherAttach(t *testing.T) {
|
|||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
pv *v1.PersistentVolume
|
|
||||||
nodeName string
|
nodeName string
|
||||||
attachHash [32]byte
|
driverName string
|
||||||
|
volumeName string
|
||||||
|
attachID string
|
||||||
shouldFail bool
|
shouldFail bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "test ok 1",
|
name: "test ok 1",
|
||||||
pv: makeTestPV("test-pv-001", 10, testDriver, "test-vol-1"),
|
nodeName: "testnode-01",
|
||||||
nodeName: "test-node",
|
driverName: "testdriver-01",
|
||||||
attachHash: sha256.Sum256([]byte(fmt.Sprintf("%s%s", "test-vol-1", "test-node"))),
|
volumeName: "testvol-01",
|
||||||
|
attachID: getAttachmentName("testvol-01", "testdriver-01", "testnode-01"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "test ok 2",
|
name: "test ok 2",
|
||||||
pv: makeTestPV("test-pv-002", 10, testDriver, "test-vol-002"),
|
nodeName: "node02",
|
||||||
nodeName: "test-node",
|
driverName: "driver02",
|
||||||
attachHash: sha256.Sum256([]byte(fmt.Sprintf("%s%s", "test-vol-002", "test-node"))),
|
volumeName: "vol02",
|
||||||
|
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "missing spec",
|
name: "mismatch vol",
|
||||||
pv: nil,
|
nodeName: "node02",
|
||||||
nodeName: "test-node",
|
driverName: "driver02",
|
||||||
attachHash: sha256.Sum256([]byte(fmt.Sprintf("%s%s", "test-vol-3", "test-node"))),
|
volumeName: "vol01",
|
||||||
|
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||||
|
shouldFail: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mismatch driver",
|
||||||
|
nodeName: "node02",
|
||||||
|
driverName: "driver000",
|
||||||
|
volumeName: "vol02",
|
||||||
|
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||||
|
shouldFail: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mismatch node",
|
||||||
|
nodeName: "node000",
|
||||||
|
driverName: "driver000",
|
||||||
|
volumeName: "vol02",
|
||||||
|
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||||
shouldFail: true,
|
shouldFail: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
// attacher loop
|
||||||
var spec *volume.Spec
|
for i, tc := range testCases {
|
||||||
if tc.pv != nil {
|
t.Log("test case: ", tc.name)
|
||||||
spec = volume.NewSpecFromPersistentVolume(tc.pv, tc.pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
spec := volume.NewSpecFromPersistentVolume(makeTestPV(fmt.Sprintf("test-pv%d", i), 10, tc.driverName, tc.volumeName), false)
|
||||||
|
|
||||||
|
go func(id, nodename string, fail bool) {
|
||||||
|
attachID, err := csiAttacher.Attach(spec, types.NodeName(nodename))
|
||||||
|
if !fail && err != nil {
|
||||||
|
t.Error("was not expecting failure, but got err: ", err)
|
||||||
|
}
|
||||||
|
if attachID != id && !fail {
|
||||||
|
t.Errorf("expecting attachID %v, got %v", id, attachID)
|
||||||
|
}
|
||||||
|
}(tc.attachID, tc.nodeName, tc.shouldFail)
|
||||||
|
|
||||||
|
// update attachment to avoid long waitForAttachment
|
||||||
|
ticker := time.NewTicker(10 * time.Millisecond)
|
||||||
|
defer ticker.Stop()
|
||||||
|
// wait for attachment to be saved
|
||||||
|
var attach *storage.VolumeAttachment
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
attach, err = csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if apierrs.IsNotFound(err) {
|
||||||
|
<-ticker.C
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if attach != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
attachID, err := csiAttacher.Attach(spec, types.NodeName(tc.nodeName))
|
if attach == nil {
|
||||||
if tc.shouldFail && err == nil {
|
t.Error("attachment not found")
|
||||||
t.Error("expected failure, but got nil err")
|
|
||||||
}
|
}
|
||||||
if attachID != "" {
|
attach.Status.Attached = true
|
||||||
expectedID := fmt.Sprintf("pv-%x", tc.attachHash)
|
_, err = csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Update(attach)
|
||||||
if attachID != expectedID {
|
if err != nil {
|
||||||
t.Errorf("expecting attachID %v, got %v", expectedID, attachID)
|
t.Error(err)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -136,8 +180,8 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
|||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
t.Logf("running test: %v", tc.name)
|
t.Logf("running test: %v", tc.name)
|
||||||
pvName := fmt.Sprintf("test-pv-%d", i)
|
pvName := fmt.Sprintf("test-pv-%d", i)
|
||||||
attachID := fmt.Sprintf("pv-%s", hashAttachmentName(pvName, nodeName))
|
volID := fmt.Sprintf("test-vol-%d", i)
|
||||||
|
attachID := getAttachmentName(volID, testDriver, nodeName)
|
||||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||||
attachment.Status.Attached = tc.attached
|
attachment.Status.Attached = tc.attached
|
||||||
attachment.Status.AttachError = tc.attachErr
|
attachment.Status.AttachError = tc.attachErr
|
||||||
@ -150,7 +194,7 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
retID, err := csiAttacher.waitForVolumeAttachment("test-vol", attachID, tc.timeout)
|
retID, err := csiAttacher.waitForVolumeAttachment(volID, attachID, tc.timeout)
|
||||||
if tc.shouldFail && err == nil {
|
if tc.shouldFail && err == nil {
|
||||||
t.Error("expecting failure, but err is nil")
|
t.Error("expecting failure, but err is nil")
|
||||||
}
|
}
|
||||||
@ -192,7 +236,7 @@ func TestAttacherVolumesAreAttached(t *testing.T) {
|
|||||||
pv := makeTestPV("test-pv", 10, testDriver, volName)
|
pv := makeTestPV("test-pv", 10, testDriver, volName)
|
||||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||||
specs = append(specs, spec)
|
specs = append(specs, spec)
|
||||||
attachID := getAttachmentName(volName, nodeName)
|
attachID := getAttachmentName(volName, testDriver, nodeName)
|
||||||
attachment := makeTestAttachment(attachID, nodeName, pv.GetName())
|
attachment := makeTestAttachment(attachID, nodeName, pv.GetName())
|
||||||
attachment.Status.Attached = stat
|
attachment.Status.Attached = stat
|
||||||
_, err := csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
|
_, err := csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
|
||||||
@ -239,9 +283,9 @@ func TestAttacherDetach(t *testing.T) {
|
|||||||
attachID string
|
attachID string
|
||||||
shouldFail bool
|
shouldFail bool
|
||||||
}{
|
}{
|
||||||
{name: "normal test", volID: "vol-001", attachID: fmt.Sprintf("pv-%s", hashAttachmentName("vol-001", nodeName))},
|
{name: "normal test", volID: "vol-001", attachID: getAttachmentName("vol-001", testDriver, nodeName)},
|
||||||
{name: "normal test 2", volID: "vol-002", attachID: fmt.Sprintf("pv-%s", hashAttachmentName("vol-002", nodeName))},
|
{name: "normal test 2", volID: "vol-002", attachID: getAttachmentName("vol-002", testDriver, nodeName)},
|
||||||
{name: "object not found", volID: "vol-001", attachID: fmt.Sprintf("pv-%s", hashAttachmentName("vol-002", nodeName)), shouldFail: true},
|
{name: "object not found", volID: "vol-001", attachID: getAttachmentName("vol-002", testDriver, nodeName), shouldFail: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
|
@ -39,6 +39,7 @@ type csiClient interface {
|
|||||||
targetPath string,
|
targetPath string,
|
||||||
accessMode api.PersistentVolumeAccessMode,
|
accessMode api.PersistentVolumeAccessMode,
|
||||||
volumeInfo map[string]string,
|
volumeInfo map[string]string,
|
||||||
|
volumeAttribs map[string]string,
|
||||||
fsType string,
|
fsType string,
|
||||||
) error
|
) error
|
||||||
NodeUnpublishVolume(ctx grpctx.Context, volID string, targetPath string) error
|
NodeUnpublishVolume(ctx grpctx.Context, volID string, targetPath string) error
|
||||||
@ -141,6 +142,7 @@ func (c *csiDriverClient) NodePublishVolume(
|
|||||||
targetPath string,
|
targetPath string,
|
||||||
accessMode api.PersistentVolumeAccessMode,
|
accessMode api.PersistentVolumeAccessMode,
|
||||||
volumeInfo map[string]string,
|
volumeInfo map[string]string,
|
||||||
|
volumeAttribs map[string]string,
|
||||||
fsType string,
|
fsType string,
|
||||||
) error {
|
) error {
|
||||||
|
|
||||||
@ -161,6 +163,7 @@ func (c *csiDriverClient) NodePublishVolume(
|
|||||||
TargetPath: targetPath,
|
TargetPath: targetPath,
|
||||||
Readonly: readOnly,
|
Readonly: readOnly,
|
||||||
PublishVolumeInfo: volumeInfo,
|
PublishVolumeInfo: volumeInfo,
|
||||||
|
VolumeAttributes: volumeAttribs,
|
||||||
|
|
||||||
VolumeCapability: &csipb.VolumeCapability{
|
VolumeCapability: &csipb.VolumeCapability{
|
||||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||||
|
@ -90,6 +90,7 @@ func TestClientNodePublishVolume(t *testing.T) {
|
|||||||
tc.targetPath,
|
tc.targetPath,
|
||||||
api.ReadWriteOnce,
|
api.ReadWriteOnce,
|
||||||
map[string]string{"device": "/dev/null"},
|
map[string]string{"device": "/dev/null"},
|
||||||
|
map[string]string{"attr0": "val0"},
|
||||||
tc.fsType,
|
tc.fsType,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package csi
|
package csi
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
@ -24,7 +25,6 @@ import (
|
|||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
grpctx "golang.org/x/net/context"
|
grpctx "golang.org/x/net/context"
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1alpha1"
|
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
@ -77,11 +77,18 @@ func (c *csiMountMgr) SetUp(fsGroup *int64) error {
|
|||||||
func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
||||||
glog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
|
glog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
|
||||||
|
|
||||||
|
csiSource, err := getCSISourceFromSpec(c.spec)
|
||||||
|
if err != nil {
|
||||||
|
glog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
|
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
csi := c.csiClient
|
csi := c.csiClient
|
||||||
pvName := c.spec.PersistentVolume.GetName()
|
nodeName := string(c.plugin.host.GetNodeName())
|
||||||
|
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||||
|
|
||||||
// ensure version is supported
|
// ensure version is supported
|
||||||
if err := csi.AssertSupportedVersion(ctx, csiVersion); err != nil {
|
if err := csi.AssertSupportedVersion(ctx, csiVersion); err != nil {
|
||||||
@ -92,48 +99,47 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
|||||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||||
if c.volumeInfo == nil {
|
if c.volumeInfo == nil {
|
||||||
|
|
||||||
//TODO (vladimirvivien) consider using VolumesAttachments().Get() to retrieve
|
attachment, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||||
//the object directly. This requires the ability to reconstruct the ID using volumeName+nodeName (nodename may not be avilable)
|
|
||||||
attachList, err := c.k8s.StorageV1alpha1().VolumeAttachments().List(meta.ListOptions{})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Error(log("failed to get volume attachments: %v", err))
|
glog.Error(log("mounter.SetupAt failed while getting volume attachment [id=%v]: %v", attachID, err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var attachment *storage.VolumeAttachment
|
|
||||||
for _, attach := range attachList.Items {
|
|
||||||
if attach.Spec.Source.PersistentVolumeName != nil &&
|
|
||||||
*attach.Spec.Source.PersistentVolumeName == pvName {
|
|
||||||
attachment = &attach
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if attachment == nil {
|
if attachment == nil {
|
||||||
glog.Error(log("unable to find VolumeAttachment with PV.name = %s", pvName))
|
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
|
||||||
return errors.New("no existing VolumeAttachment found")
|
return errors.New("no existing VolumeAttachment found")
|
||||||
}
|
}
|
||||||
c.volumeInfo = attachment.Status.AttachmentMetadata
|
c.volumeInfo = attachment.Status.AttachmentMetadata
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// get volume attributes
|
||||||
|
// TODO: for alpha vol atttributes are passed via PV.Annotations
|
||||||
|
// Beta will fix that
|
||||||
|
attribs, err := getVolAttribsFromSpec(c.spec)
|
||||||
|
if err != nil {
|
||||||
|
glog.Error(log("mounter.SetUpAt failed to extract volume attributes from PV annotations: %v", err))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||||
accessMode := api.ReadWriteOnce
|
accessMode := api.ReadWriteOnce
|
||||||
if c.spec.PersistentVolume.Spec.AccessModes != nil {
|
if c.spec.PersistentVolume.Spec.AccessModes != nil {
|
||||||
accessMode = c.spec.PersistentVolume.Spec.AccessModes[0]
|
accessMode = c.spec.PersistentVolume.Spec.AccessModes[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
err := csi.NodePublishVolume(
|
err = csi.NodePublishVolume(
|
||||||
ctx,
|
ctx,
|
||||||
c.volumeID,
|
c.volumeID,
|
||||||
c.readOnly,
|
c.readOnly,
|
||||||
dir,
|
dir,
|
||||||
accessMode,
|
accessMode,
|
||||||
c.volumeInfo,
|
c.volumeInfo,
|
||||||
|
attribs,
|
||||||
"ext4", //TODO needs to be sourced from PV or somewhere else
|
"ext4", //TODO needs to be sourced from PV or somewhere else
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf(log("Mounter.Setup failed: %v", err))
|
glog.Errorf(log("Mounter.SetupAt failed: %v", err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
glog.V(4).Infof(log("successfully mounted %s", dir))
|
glog.V(4).Infof(log("successfully mounted %s", dir))
|
||||||
@ -192,3 +198,26 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getVolAttribsFromSpec exracts CSI VolumeAttributes information from PV.Annotations
|
||||||
|
// using key csi.kubernetes.io/volume-attributes. The annotation value is expected
|
||||||
|
// to be a JSON-encoded object of form {"key0":"val0",...,"keyN":"valN"}
|
||||||
|
func getVolAttribsFromSpec(spec *volume.Spec) (map[string]string, error) {
|
||||||
|
if spec == nil {
|
||||||
|
return nil, errors.New("missing volume spec")
|
||||||
|
}
|
||||||
|
annotations := spec.PersistentVolume.GetAnnotations()
|
||||||
|
if annotations == nil {
|
||||||
|
return nil, nil // no annotations found
|
||||||
|
}
|
||||||
|
jsonAttribs := annotations[csiVolAttribsAnnotationKey]
|
||||||
|
if jsonAttribs == "" {
|
||||||
|
return nil, nil // csi annotation not found
|
||||||
|
}
|
||||||
|
attribs := map[string]string{}
|
||||||
|
if err := json.Unmarshal([]byte(jsonAttribs), &attribs); err != nil {
|
||||||
|
glog.Error(log("error parsing csi PV.Annotation [%s]=%s: %v", csiVolAttribsAnnotationKey, jsonAttribs, err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return attribs, nil
|
||||||
|
}
|
||||||
|
@ -26,8 +26,10 @@ import (
|
|||||||
storage "k8s.io/api/storage/v1alpha1"
|
storage "k8s.io/api/storage/v1alpha1"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/csi/fake"
|
"k8s.io/kubernetes/pkg/volume/csi/fake"
|
||||||
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -68,7 +70,14 @@ func TestMounterGetPath(t *testing.T) {
|
|||||||
func TestMounterSetUp(t *testing.T) {
|
func TestMounterSetUp(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
fakeClient := fakeclient.NewSimpleClientset()
|
||||||
|
host := volumetest.NewFakeVolumeHostWithNodeName(
|
||||||
|
tmpDir,
|
||||||
|
fakeClient,
|
||||||
|
nil,
|
||||||
|
"fakeNode",
|
||||||
|
)
|
||||||
|
plug.host = host
|
||||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||||
pvName := pv.GetName()
|
pvName := pv.GetName()
|
||||||
|
|
||||||
@ -88,9 +97,11 @@ func TestMounterSetUp(t *testing.T) {
|
|||||||
csiMounter := mounter.(*csiMountMgr)
|
csiMounter := mounter.(*csiMountMgr)
|
||||||
csiMounter.csiClient = setupClient(t)
|
csiMounter.csiClient = setupClient(t)
|
||||||
|
|
||||||
|
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
|
||||||
|
|
||||||
attachment := &storage.VolumeAttachment{
|
attachment := &storage.VolumeAttachment{
|
||||||
ObjectMeta: meta.ObjectMeta{
|
ObjectMeta: meta.ObjectMeta{
|
||||||
Name: "pv-1234556775313",
|
Name: attachID,
|
||||||
},
|
},
|
||||||
Spec: storage.VolumeAttachmentSpec{
|
Spec: storage.VolumeAttachmentSpec{
|
||||||
NodeName: "test-node",
|
NodeName: "test-node",
|
||||||
@ -150,3 +161,50 @@ func TestUnmounterTeardown(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetVolAttribsFromSpec(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
annotations map[string]string
|
||||||
|
attribs map[string]string
|
||||||
|
shouldFail bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "attribs ok",
|
||||||
|
annotations: map[string]string{"key0": "val0", csiVolAttribsAnnotationKey: `{"k0":"attr0","k1":"attr1","k2":"attr2"}`, "keyN": "valN"},
|
||||||
|
attribs: map[string]string{"k0": "attr0", "k1": "attr1", "k2": "attr2"},
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "missing attribs",
|
||||||
|
annotations: map[string]string{"key0": "val0", "keyN": "valN"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing annotations",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bad json",
|
||||||
|
annotations: map[string]string{"key0": "val0", csiVolAttribsAnnotationKey: `{"k0""attr0","k1":"attr1,"k2":"attr2"`, "keyN": "valN"},
|
||||||
|
attribs: map[string]string{"k0": "attr0", "k1": "attr1", "k2": "attr2"},
|
||||||
|
shouldFail: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, testDriver, testVol), false)
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Log("test case:", tc.name)
|
||||||
|
spec.PersistentVolume.Annotations = tc.annotations
|
||||||
|
attribs, err := getVolAttribsFromSpec(spec)
|
||||||
|
if !tc.shouldFail && err != nil {
|
||||||
|
t.Error("test case should not fail, but err != nil", err)
|
||||||
|
}
|
||||||
|
eq := true
|
||||||
|
for k, v := range attribs {
|
||||||
|
if tc.attribs[k] != v {
|
||||||
|
eq = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !eq {
|
||||||
|
t.Errorf("expecting attribs %#v, but got %#v", tc.attribs, attribs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -34,14 +34,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
csiName = "csi"
|
csiPluginName = "kubernetes.io/csi"
|
||||||
csiPluginName = "kubernetes.io/csi"
|
csiVolAttribsAnnotationKey = "csi.volume.kubernetes.io/volume-attributes"
|
||||||
|
|
||||||
// TODO (vladimirvivien) implement a more dynamic way to discover
|
// TODO (vladimirvivien) implement a more dynamic way to discover
|
||||||
// the unix domain socket path for each installed csi driver.
|
// the unix domain socket path for each installed csi driver.
|
||||||
// TODO (vladimirvivien) would be nice to name socket with a .sock extension
|
// TODO (vladimirvivien) would be nice to name socket with a .sock extension
|
||||||
// for consistency.
|
// for consistency.
|
||||||
csiAddrTemplate = "/var/lib/kubelet/plugins/%v"
|
csiAddrTemplate = "/var/lib/kubelet/plugins/%v/csi.sock"
|
||||||
csiTimeout = 15 * time.Second
|
csiTimeout = 15 * time.Second
|
||||||
volNameSep = "^"
|
volNameSep = "^"
|
||||||
)
|
)
|
||||||
|
@ -303,6 +303,9 @@ type VolumeHost interface {
|
|||||||
|
|
||||||
// Returns the labels on the node
|
// Returns the labels on the node
|
||||||
GetNodeLabels() (map[string]string, error)
|
GetNodeLabels() (map[string]string, error)
|
||||||
|
|
||||||
|
// Returns the name of the node
|
||||||
|
GetNodeName() types.NodeName
|
||||||
}
|
}
|
||||||
|
|
||||||
// VolumePluginMgr tracks registered plugins.
|
// VolumePluginMgr tracks registered plugins.
|
||||||
|
@ -53,6 +53,7 @@ type fakeVolumeHost struct {
|
|||||||
exec mount.Exec
|
exec mount.Exec
|
||||||
writer io.Writer
|
writer io.Writer
|
||||||
nodeLabels map[string]string
|
nodeLabels map[string]string
|
||||||
|
nodeName string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFakeVolumeHost(rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin) *fakeVolumeHost {
|
func NewFakeVolumeHost(rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin) *fakeVolumeHost {
|
||||||
@ -69,6 +70,12 @@ func NewFakeVolumeHostWithNodeLabels(rootDir string, kubeClient clientset.Interf
|
|||||||
return volHost
|
return volHost
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewFakeVolumeHostWithNodeName(rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, nodeName string) *fakeVolumeHost {
|
||||||
|
volHost := newFakeVolumeHost(rootDir, kubeClient, plugins, nil)
|
||||||
|
volHost.nodeName = nodeName
|
||||||
|
return volHost
|
||||||
|
}
|
||||||
|
|
||||||
func newFakeVolumeHost(rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface) *fakeVolumeHost {
|
func newFakeVolumeHost(rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface) *fakeVolumeHost {
|
||||||
host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: cloud}
|
host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: cloud}
|
||||||
host.mounter = &mount.FakeMounter{}
|
host.mounter = &mount.FakeMounter{}
|
||||||
@ -177,6 +184,10 @@ func (f *fakeVolumeHost) GetNodeLabels() (map[string]string, error) {
|
|||||||
return f.nodeLabels, nil
|
return f.nodeLabels, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *fakeVolumeHost) GetNodeName() types.NodeName {
|
||||||
|
return types.NodeName(f.nodeName)
|
||||||
|
}
|
||||||
|
|
||||||
func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin {
|
func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin {
|
||||||
if _, ok := config.OtherAttributes["fake-property"]; ok {
|
if _, ok := config.OtherAttributes["fake-property"]; ok {
|
||||||
return []VolumePlugin{
|
return []VolumePlugin{
|
||||||
|
@ -58,16 +58,25 @@ func buildControllerRoles() ([]rbac.ClusterRole, []rbac.ClusterRoleBinding) {
|
|||||||
// controllerRoleBindings is a slice of roles used for controllers
|
// controllerRoleBindings is a slice of roles used for controllers
|
||||||
controllerRoleBindings := []rbac.ClusterRoleBinding{}
|
controllerRoleBindings := []rbac.ClusterRoleBinding{}
|
||||||
|
|
||||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbac.ClusterRole {
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "attachdetach-controller"},
|
role := rbac.ClusterRole{
|
||||||
Rules: []rbac.PolicyRule{
|
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "attachdetach-controller"},
|
||||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(),
|
Rules: []rbac.PolicyRule{
|
||||||
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(),
|
||||||
rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||||
eventsRule(),
|
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||||
},
|
eventsRule(),
|
||||||
})
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
|
||||||
|
role.Rules = append(role.Rules, rbac.NewRule("get", "create", "delete", "list", "watch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie())
|
||||||
|
}
|
||||||
|
|
||||||
|
return role
|
||||||
|
}())
|
||||||
|
|
||||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "clusterrole-aggregation-controller"},
|
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "clusterrole-aggregation-controller"},
|
||||||
Rules: []rbac.PolicyRule{
|
Rules: []rbac.PolicyRule{
|
||||||
|
@ -143,6 +143,12 @@ func NodeRules() []rbac.PolicyRule {
|
|||||||
pvcStatusPolicyRule := rbac.NewRule("get", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie()
|
pvcStatusPolicyRule := rbac.NewRule("get", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie()
|
||||||
nodePolicyRules = append(nodePolicyRules, pvcStatusPolicyRule)
|
nodePolicyRules = append(nodePolicyRules, pvcStatusPolicyRule)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CSI
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
|
||||||
|
volAttachRule := rbac.NewRule("get").Groups(storageGroup).Resources("volumeattachments").RuleOrDie()
|
||||||
|
nodePolicyRules = append(nodePolicyRules, volAttachRule)
|
||||||
|
}
|
||||||
return nodePolicyRules
|
return nodePolicyRules
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) {
|
|||||||
node, err := c.Get(id)
|
node, err := c.Get(id)
|
||||||
|
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil, fmt.Errorf("node '%v' not found", id)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1214,6 +1214,10 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods [
|
|||||||
if affinity != nil && affinity.PodAntiAffinity != nil {
|
if affinity != nil && affinity.PodAntiAffinity != nil {
|
||||||
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
|
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
glog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
existingPodMatchingTerms, err := getMatchingAntiAffinityTermsOfExistingPod(pod, existingPod, existingPodNode)
|
existingPodMatchingTerms, err := getMatchingAntiAffinityTermsOfExistingPod(pod, existingPod, existingPodNode)
|
||||||
|
@ -36,6 +36,7 @@ go_library(
|
|||||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||||
"//vendor/github.com/golang/glog:go_default_library",
|
"//vendor/github.com/golang/glog:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||||
@ -137,6 +138,10 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
|||||||
processPod := func(existingPod *v1.Pod) error {
|
processPod := func(existingPod *v1.Pod) error {
|
||||||
existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName)
|
existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
glog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
existingPodAffinity := existingPod.Spec.Affinity
|
existingPodAffinity := existingPod.Spec.Affinity
|
||||||
@ -189,19 +194,21 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
|||||||
}
|
}
|
||||||
processNode := func(i int) {
|
processNode := func(i int) {
|
||||||
nodeInfo := nodeNameToInfo[allNodeNames[i]]
|
nodeInfo := nodeNameToInfo[allNodeNames[i]]
|
||||||
if hasAffinityConstraints || hasAntiAffinityConstraints {
|
if nodeInfo.Node() != nil {
|
||||||
// We need to process all the nodes.
|
if hasAffinityConstraints || hasAntiAffinityConstraints {
|
||||||
for _, existingPod := range nodeInfo.Pods() {
|
// We need to process all the nodes.
|
||||||
if err := processPod(existingPod); err != nil {
|
for _, existingPod := range nodeInfo.Pods() {
|
||||||
pm.setError(err)
|
if err := processPod(existingPod); err != nil {
|
||||||
|
pm.setError(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
} else {
|
// The pod doesn't have any constraints - we need to check only existing
|
||||||
// The pod doesn't have any constraints - we need to check only existing
|
// ones that have some.
|
||||||
// ones that have some.
|
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
||||||
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
if err := processPod(existingPod); err != nil {
|
||||||
if err := processPod(existingPod); err != nil {
|
pm.setError(err)
|
||||||
pm.setError(err)
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user