mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-13 11:25:19 +00:00
Add basic install and mount flexvolumes e2e tests
This commit is contained in:
@@ -342,6 +342,12 @@ func SkipUnlessProviderIs(supportedProviders ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
func SkipUnlessMasterOSDistroIs(supportedMasterOsDistros ...string) {
|
||||
if !MasterOSDistroIs(supportedMasterOsDistros...) {
|
||||
Skipf("Only supported for master OS distro %v (not %s)", supportedMasterOsDistros, TestContext.MasterOSDistro)
|
||||
}
|
||||
}
|
||||
|
||||
func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) {
|
||||
if !NodeOSDistroIs(supportedNodeOsDistros...) {
|
||||
Skipf("Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, TestContext.NodeOSDistro)
|
||||
@@ -383,6 +389,15 @@ func ProviderIs(providers ...string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func MasterOSDistroIs(supportedMasterOsDistros ...string) bool {
|
||||
for _, distro := range supportedMasterOsDistros {
|
||||
if strings.ToLower(distro) == strings.ToLower(TestContext.MasterOSDistro) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func NodeOSDistroIs(supportedNodeOsDistros ...string) bool {
|
||||
for _, distro := range supportedNodeOsDistros {
|
||||
if strings.ToLower(distro) == strings.ToLower(TestContext.NodeOSDistro) {
|
||||
@@ -3758,6 +3773,43 @@ func RestartKubeProxy(host string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func RestartKubelet(host string) error {
|
||||
// TODO: Make it work for all providers and distros.
|
||||
if !ProviderIs("gce", "aws") {
|
||||
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
|
||||
}
|
||||
if ProviderIs("gce") && !NodeOSDistroIs("debian", "gci") {
|
||||
return fmt.Errorf("unsupported node OS distro: %s", TestContext.NodeOSDistro)
|
||||
}
|
||||
var cmd string
|
||||
if ProviderIs("gce") && NodeOSDistroIs("debian") {
|
||||
cmd = "sudo /etc/init.d/kubelet restart"
|
||||
} else {
|
||||
cmd = "sudo systemctl restart kubelet"
|
||||
}
|
||||
Logf("Restarting kubelet via ssh, running: %v", cmd)
|
||||
result, err := SSH(cmd, host, TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
return fmt.Errorf("couldn't restart kubelet: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WaitForKubeletUp(host string) error {
|
||||
cmd := "curl http://localhost:" + strconv.Itoa(ports.KubeletReadOnlyPort) + "/healthz"
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||
result, err := SSH(cmd, host, TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
}
|
||||
if result.Stdout == "ok" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("waiting for kubelet timed out")
|
||||
}
|
||||
|
||||
func RestartApiserver(c discovery.ServerVersionInterface) error {
|
||||
// TODO: Make it work for all providers.
|
||||
if !ProviderIs("gce", "gke", "aws") {
|
||||
@@ -3806,6 +3858,38 @@ func WaitForApiserverUp(c clientset.Interface) error {
|
||||
return fmt.Errorf("waiting for apiserver timed out")
|
||||
}
|
||||
|
||||
func RestartControllerManager() error {
|
||||
// TODO: Make it work for all providers and distros.
|
||||
if !ProviderIs("gce", "aws") {
|
||||
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
|
||||
}
|
||||
if ProviderIs("gce") && !MasterOSDistroIs("gci") {
|
||||
return fmt.Errorf("unsupported master OS distro: %s", TestContext.MasterOSDistro)
|
||||
}
|
||||
cmd := "sudo docker ps | grep k8s_kube-controller-manager | cut -d ' ' -f 1 | xargs sudo docker kill"
|
||||
Logf("Restarting controller-manager via ssh, running: %v", cmd)
|
||||
result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
return fmt.Errorf("couldn't restart controller-manager: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WaitForControllerManagerUp() error {
|
||||
cmd := "curl http://localhost:" + strconv.Itoa(ports.ControllerManagerPort) + "/healthz"
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||
result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
}
|
||||
if result.Stdout == "ok" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("waiting for controller-manager timed out")
|
||||
}
|
||||
|
||||
// WaitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it.
|
||||
// By cluster size we mean number of Nodes excluding Master Node.
|
||||
func WaitForClusterSize(c clientset.Interface, size int, timeout time.Duration) error {
|
||||
|
||||
@@ -96,8 +96,10 @@ type VolumeTestConfig struct {
|
||||
// Wait for the pod to terminate successfully
|
||||
// False indicates that the pod is long running
|
||||
WaitForCompletion bool
|
||||
// NodeName to run pod on. Default is any node.
|
||||
NodeName string
|
||||
// ServerNodeName is the spec.nodeName to run server pod on. Default is any node.
|
||||
ServerNodeName string
|
||||
// ClientNodeName is the spec.nodeName to run client pod on. Default is any node.
|
||||
ClientNodeName string
|
||||
}
|
||||
|
||||
// VolumeTest contains a volume to mount into a client pod and its
|
||||
@@ -283,7 +285,7 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
|
||||
},
|
||||
Volumes: volumes,
|
||||
RestartPolicy: restartPolicy,
|
||||
NodeName: config.NodeName,
|
||||
NodeName: config.ServerNodeName,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -388,7 +390,8 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro
|
||||
Level: "s0:c0,c1",
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{},
|
||||
Volumes: []v1.Volume{},
|
||||
NodeName: config.ClientNodeName,
|
||||
},
|
||||
}
|
||||
podsNamespacer := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
Reference in New Issue
Block a user