mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-26 04:36:00 +00:00
Update test/e2e for test/e2e/framework refactoring
This commit is contained in:
281
test/e2e/pods.go
281
test/e2e/pods.go
@@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -54,7 +55,7 @@ var (
|
||||
func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRestarts int, timeout time.Duration) {
|
||||
By(fmt.Sprintf("Creating pod %s in namespace %s", podDescr.Name, ns))
|
||||
_, err := c.Pods(ns).Create(podDescr)
|
||||
expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))
|
||||
|
||||
// At the end of the test, clean up by removing the pod.
|
||||
defer func() {
|
||||
@@ -65,16 +66,16 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe
|
||||
// Wait until the pod is not pending. (Here we need to check for something other than
|
||||
// 'Pending' other than checking for 'Running', since when failures occur, we go to
|
||||
// 'Terminated' which can cause indefinite blocking.)
|
||||
expectNoError(waitForPodNotPending(c, ns, podDescr.Name),
|
||||
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, podDescr.Name),
|
||||
fmt.Sprintf("starting pod %s in namespace %s", podDescr.Name, ns))
|
||||
Logf("Started pod %s in namespace %s", podDescr.Name, ns)
|
||||
framework.Logf("Started pod %s in namespace %s", podDescr.Name, ns)
|
||||
|
||||
// Check the pod's current state and verify that restartCount is present.
|
||||
By("checking the pod's current state and verifying that restartCount is present")
|
||||
pod, err := c.Pods(ns).Get(podDescr.Name)
|
||||
expectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", podDescr.Name, ns))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", podDescr.Name, ns))
|
||||
initialRestartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount
|
||||
Logf("Initial restart count of pod %s is %d", podDescr.Name, initialRestartCount)
|
||||
framework.Logf("Initial restart count of pod %s is %d", podDescr.Name, initialRestartCount)
|
||||
|
||||
// Wait for the restart state to be as desired.
|
||||
deadline := time.Now().Add(timeout)
|
||||
@@ -82,13 +83,13 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe
|
||||
observedRestarts := 0
|
||||
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
|
||||
pod, err = c.Pods(ns).Get(podDescr.Name)
|
||||
expectNoError(err, fmt.Sprintf("getting pod %s", podDescr.Name))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podDescr.Name))
|
||||
restartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount
|
||||
if restartCount != lastRestartCount {
|
||||
Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
|
||||
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
|
||||
ns, podDescr.Name, restartCount, time.Since(start))
|
||||
if restartCount < lastRestartCount {
|
||||
Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
|
||||
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
|
||||
ns, podDescr.Name, lastRestartCount, restartCount)
|
||||
}
|
||||
}
|
||||
@@ -104,7 +105,7 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe
|
||||
// If we expected n restarts (n > 0), fail if we observed < n restarts.
|
||||
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
|
||||
observedRestarts < expectNumRestarts) {
|
||||
Failf("pod %s/%s - expected number of restarts: %t, found restarts: %t",
|
||||
framework.Failf("pod %s/%s - expected number of restarts: %t, found restarts: %t",
|
||||
ns, podDescr.Name, expectNumRestarts, observedRestarts)
|
||||
}
|
||||
}
|
||||
@@ -115,12 +116,12 @@ func testHostIP(c *client.Client, ns string, pod *api.Pod) {
|
||||
By("creating pod")
|
||||
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
framework.Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
By("ensuring that pod is running and has a hostIP")
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
err := waitForPodRunningInNamespace(c, pod.Name, ns)
|
||||
err := framework.WaitForPodRunningInNamespace(c, pod.Name, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Try to make sure we get a hostIP for each pod.
|
||||
hostIPTimeout := 2 * time.Minute
|
||||
@@ -129,56 +130,56 @@ func testHostIP(c *client.Client, ns string, pod *api.Pod) {
|
||||
p, err := podClient.Get(pod.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if p.Status.HostIP != "" {
|
||||
Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
|
||||
framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
|
||||
break
|
||||
}
|
||||
if time.Since(t) >= hostIPTimeout {
|
||||
Failf("Gave up waiting for hostIP of pod %s after %v seconds",
|
||||
framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
|
||||
p.Name, time.Since(t).Seconds())
|
||||
}
|
||||
Logf("Retrying to get the hostIP of pod %s", p.Name)
|
||||
framework.Logf("Retrying to get the hostIP of pod %s", p.Name)
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func runPodFromStruct(framework *Framework, pod *api.Pod) {
|
||||
func runPodFromStruct(f *framework.Framework, pod *api.Pod) {
|
||||
By("submitting the pod to kubernetes")
|
||||
|
||||
podClient := framework.Client.Pods(framework.Namespace.Name)
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
pod, err := podClient.Create(pod)
|
||||
if err != nil {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
framework.Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
expectNoError(framework.WaitForPodRunning(pod.Name))
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
pod, err = podClient.Get(pod.Name)
|
||||
if err != nil {
|
||||
Failf("failed to get pod: %v", err)
|
||||
framework.Failf("failed to get pod: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func startPodAndGetBackOffs(framework *Framework, pod *api.Pod, podName string, containerName string, sleepAmount time.Duration) (time.Duration, time.Duration) {
|
||||
runPodFromStruct(framework, pod)
|
||||
func startPodAndGetBackOffs(f *framework.Framework, pod *api.Pod, podName string, containerName string, sleepAmount time.Duration) (time.Duration, time.Duration) {
|
||||
runPodFromStruct(f, pod)
|
||||
time.Sleep(sleepAmount)
|
||||
|
||||
By("getting restart delay-0")
|
||||
_, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName)
|
||||
_, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName)
|
||||
if err != nil {
|
||||
Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
}
|
||||
|
||||
By("getting restart delay-1")
|
||||
delay1, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName)
|
||||
delay1, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName)
|
||||
if err != nil {
|
||||
Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
}
|
||||
|
||||
By("getting restart delay-2")
|
||||
delay2, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName)
|
||||
delay2, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName)
|
||||
if err != nil {
|
||||
Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
}
|
||||
return delay1, delay2
|
||||
}
|
||||
@@ -188,29 +189,29 @@ func getRestartDelay(c *client.Client, pod *api.Pod, ns string, name string, con
|
||||
for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay
|
||||
time.Sleep(time.Second)
|
||||
pod, err := c.Pods(ns).Get(name)
|
||||
expectNoError(err, fmt.Sprintf("getting pod %s", name))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", name))
|
||||
status, ok := api.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
|
||||
if !ok {
|
||||
Logf("getRestartDelay: status missing")
|
||||
framework.Logf("getRestartDelay: status missing")
|
||||
continue
|
||||
}
|
||||
|
||||
if status.State.Waiting == nil && status.State.Running != nil && status.LastTerminationState.Terminated != nil && status.State.Running.StartedAt.Time.After(beginTime) {
|
||||
startedAt := status.State.Running.StartedAt.Time
|
||||
finishedAt := status.LastTerminationState.Terminated.FinishedAt.Time
|
||||
Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, finishedAt, startedAt, startedAt.Sub(finishedAt))
|
||||
framework.Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, finishedAt, startedAt, startedAt.Sub(finishedAt))
|
||||
return startedAt.Sub(finishedAt), nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("timeout getting pod restart delay")
|
||||
}
|
||||
|
||||
var _ = KubeDescribe("Pods", func() {
|
||||
framework := NewDefaultFramework("pods")
|
||||
var _ = framework.KubeDescribe("Pods", func() {
|
||||
f := framework.NewDefaultFramework("pods")
|
||||
|
||||
It("should get a host IP [Conformance]", func() {
|
||||
name := "pod-hostip-" + string(util.NewUUID())
|
||||
testHostIP(framework.Client, framework.Namespace.Name, &api.Pod{
|
||||
testHostIP(f.Client, f.Namespace.Name, &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
@@ -226,7 +227,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
It("should be schedule with cpu and memory limits [Conformance]", func() {
|
||||
podClient := framework.Client.Pods(framework.Namespace.Name)
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-update-" + string(util.NewUUID())
|
||||
@@ -257,13 +258,13 @@ var _ = KubeDescribe("Pods", func() {
|
||||
defer podClient.Delete(pod.Name, nil)
|
||||
_, err := podClient.Create(pod)
|
||||
if err != nil {
|
||||
Failf("Error creating a pod: %v", err)
|
||||
framework.Failf("Error creating a pod: %v", err)
|
||||
}
|
||||
expectNoError(framework.WaitForPodRunning(pod.Name))
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
})
|
||||
|
||||
It("should be submitted and removed [Conformance]", func() {
|
||||
podClient := framework.Client.Pods(framework.Namespace.Name)
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-update-" + string(util.NewUUID())
|
||||
@@ -301,7 +302,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
Failf("Failed to query for pods: %v", err)
|
||||
framework.Failf("Failed to query for pods: %v", err)
|
||||
}
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
options = api.ListOptions{
|
||||
@@ -310,7 +311,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
}
|
||||
w, err := podClient.Watch(options)
|
||||
if err != nil {
|
||||
Failf("Failed to set up watch: %v", err)
|
||||
framework.Failf("Failed to set up watch: %v", err)
|
||||
}
|
||||
|
||||
By("submitting the pod to kubernetes")
|
||||
@@ -320,7 +321,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
framework.Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
@@ -328,7 +329,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
options = api.ListOptions{LabelSelector: selector}
|
||||
pods, err = podClient.List(options)
|
||||
if err != nil {
|
||||
Failf("Failed to query for pods: %v", err)
|
||||
framework.Failf("Failed to query for pods: %v", err)
|
||||
}
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
@@ -336,27 +337,27 @@ var _ = KubeDescribe("Pods", func() {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type != watch.Added {
|
||||
Failf("Failed to observe pod creation: %v", event)
|
||||
framework.Failf("Failed to observe pod creation: %v", event)
|
||||
}
|
||||
case <-time.After(podStartTimeout):
|
||||
case <-time.After(framework.PodStartTimeout):
|
||||
Fail("Timeout while waiting for pod creation")
|
||||
}
|
||||
|
||||
// We need to wait for the pod to be scheduled, otherwise the deletion
|
||||
// will be carried out immediately rather than gracefully.
|
||||
expectNoError(framework.WaitForPodRunning(pod.Name))
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
if err := podClient.Delete(pod.Name, api.NewDeleteOptions(30)); err != nil {
|
||||
Failf("Failed to delete pod: %v", err)
|
||||
framework.Failf("Failed to delete pod: %v", err)
|
||||
}
|
||||
|
||||
By("verifying the kubelet observed the termination notice")
|
||||
pod, err = podClient.Get(pod.Name)
|
||||
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
||||
podList, err := GetKubeletPods(framework.Client, pod.Spec.NodeName)
|
||||
podList, err := framework.GetKubeletPods(f.Client, pod.Spec.NodeName)
|
||||
if err != nil {
|
||||
Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
|
||||
framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
|
||||
return false, nil
|
||||
}
|
||||
for _, kubeletPod := range podList.Items {
|
||||
@@ -364,12 +365,12 @@ var _ = KubeDescribe("Pods", func() {
|
||||
continue
|
||||
}
|
||||
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
|
||||
Logf("deletion has not yet been observed")
|
||||
framework.Logf("deletion has not yet been observed")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
|
||||
framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
|
||||
return true, nil
|
||||
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
|
||||
|
||||
@@ -406,7 +407,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
It("should be updated [Conformance]", func() {
|
||||
podClient := framework.Client.Pods(framework.Namespace.Name)
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-update-" + string(util.NewUUID())
|
||||
@@ -446,10 +447,10 @@ var _ = KubeDescribe("Pods", func() {
|
||||
}()
|
||||
pod, err := podClient.Create(pod)
|
||||
if err != nil {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
framework.Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
expectNoError(framework.WaitForPodRunning(pod.Name))
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
@@ -458,7 +459,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
// Standard get, update retry loop
|
||||
expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
By("updating the pod")
|
||||
value = strconv.Itoa(time.Now().Nanosecond())
|
||||
if pod == nil { // on retries we need to re-get
|
||||
@@ -470,29 +471,29 @@ var _ = KubeDescribe("Pods", func() {
|
||||
pod.Labels["time"] = value
|
||||
pod, err = podClient.Update(pod)
|
||||
if err == nil {
|
||||
Logf("Successfully updated pod")
|
||||
framework.Logf("Successfully updated pod")
|
||||
return true, nil
|
||||
}
|
||||
if errors.IsConflict(err) {
|
||||
Logf("Conflicting update to pod, re-get and re-update: %v", err)
|
||||
framework.Logf("Conflicting update to pod, re-get and re-update: %v", err)
|
||||
pod = nil // re-get it when we retry
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to update pod: %v", err)
|
||||
}))
|
||||
|
||||
expectNoError(framework.WaitForPodRunning(pod.Name))
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("verifying the updated pod is in kubernetes")
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = api.ListOptions{LabelSelector: selector}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
Logf("Pod update OK")
|
||||
framework.Logf("Pod update OK")
|
||||
})
|
||||
|
||||
It("should allow activeDeadlineSeconds to be updated [Conformance]", func() {
|
||||
podClient := framework.Client.Pods(framework.Namespace.Name)
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-update-activedeadlineseconds-" + string(util.NewUUID())
|
||||
@@ -532,10 +533,10 @@ var _ = KubeDescribe("Pods", func() {
|
||||
}()
|
||||
pod, err := podClient.Create(pod)
|
||||
if err != nil {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
framework.Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
expectNoError(framework.WaitForPodRunning(pod.Name))
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
@@ -544,7 +545,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
// Standard get, update retry loop
|
||||
expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
By("updating the pod")
|
||||
value = strconv.Itoa(time.Now().Nanosecond())
|
||||
if pod == nil { // on retries we need to re-get
|
||||
@@ -557,18 +558,18 @@ var _ = KubeDescribe("Pods", func() {
|
||||
pod.Spec.ActiveDeadlineSeconds = &newDeadline
|
||||
pod, err = podClient.Update(pod)
|
||||
if err == nil {
|
||||
Logf("Successfully updated pod")
|
||||
framework.Logf("Successfully updated pod")
|
||||
return true, nil
|
||||
}
|
||||
if errors.IsConflict(err) {
|
||||
Logf("Conflicting update to pod, re-get and re-update: %v", err)
|
||||
framework.Logf("Conflicting update to pod, re-get and re-update: %v", err)
|
||||
pod = nil // re-get it when we retry
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to update pod: %v", err)
|
||||
}))
|
||||
|
||||
expectNoError(framework.WaitForPodTerminated(pod.Name, "DeadlineExceeded"))
|
||||
framework.ExpectNoError(f.WaitForPodTerminated(pod.Name, "DeadlineExceeded"))
|
||||
})
|
||||
|
||||
It("should contain environment variables for services [Conformance]", func() {
|
||||
@@ -590,12 +591,12 @@ var _ = KubeDescribe("Pods", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
defer framework.Client.Pods(framework.Namespace.Name).Delete(serverPod.Name, api.NewDeleteOptions(0))
|
||||
_, err := framework.Client.Pods(framework.Namespace.Name).Create(serverPod)
|
||||
defer f.Client.Pods(f.Namespace.Name).Delete(serverPod.Name, api.NewDeleteOptions(0))
|
||||
_, err := f.Client.Pods(f.Namespace.Name).Create(serverPod)
|
||||
if err != nil {
|
||||
Failf("Failed to create serverPod: %v", err)
|
||||
framework.Failf("Failed to create serverPod: %v", err)
|
||||
}
|
||||
expectNoError(framework.WaitForPodRunning(serverPod.Name))
|
||||
framework.ExpectNoError(f.WaitForPodRunning(serverPod.Name))
|
||||
|
||||
// This service exposes port 8080 of the test pod as a service on port 8765
|
||||
// TODO(filbranden): We would like to use a unique service name such as:
|
||||
@@ -622,10 +623,10 @@ var _ = KubeDescribe("Pods", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
defer framework.Client.Services(framework.Namespace.Name).Delete(svc.Name)
|
||||
_, err = framework.Client.Services(framework.Namespace.Name).Create(svc)
|
||||
defer f.Client.Services(f.Namespace.Name).Delete(svc.Name)
|
||||
_, err = f.Client.Services(f.Namespace.Name).Create(svc)
|
||||
if err != nil {
|
||||
Failf("Failed to create service: %v", err)
|
||||
framework.Failf("Failed to create service: %v", err)
|
||||
}
|
||||
|
||||
// Make a client pod that verifies that it has the service environment variables.
|
||||
@@ -647,7 +648,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
},
|
||||
}
|
||||
|
||||
framework.TestContainerOutput("service env", pod, 0, []string{
|
||||
f.TestContainerOutput("service env", pod, 0, []string{
|
||||
"FOOSERVICE_SERVICE_HOST=",
|
||||
"FOOSERVICE_SERVICE_PORT=",
|
||||
"FOOSERVICE_PORT=",
|
||||
@@ -659,7 +660,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
It("should be restarted with a docker exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
|
||||
runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{
|
||||
runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
@@ -686,7 +687,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
It("should *not* be restarted with a docker exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
|
||||
runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{
|
||||
runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
@@ -713,7 +714,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
It("should be restarted with a /healthz http liveness probe [Conformance]", func() {
|
||||
runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{
|
||||
runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
@@ -742,7 +743,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
|
||||
// Slow by design (5 min)
|
||||
It("should have monotonically increasing restart count [Conformance] [Slow]", func() {
|
||||
runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{
|
||||
runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
@@ -770,7 +771,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
It("should *not* be restarted with a /healthz http liveness probe [Conformance]", func() {
|
||||
runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{
|
||||
runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
@@ -785,7 +786,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
Args: []string{
|
||||
"-service=liveness-http",
|
||||
"-peers=1",
|
||||
"-namespace=" + framework.Namespace.Name},
|
||||
"-namespace=" + f.Namespace.Name},
|
||||
Ports: []api.ContainerPort{{ContainerPort: 8080}},
|
||||
LivenessProbe: &api.Probe{
|
||||
Handler: api.Handler{
|
||||
@@ -805,11 +806,11 @@ var _ = KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
It("should support remote command execution over websockets", func() {
|
||||
config, err := loadConfig()
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
Failf("Unable to get base config: %v", err)
|
||||
framework.Failf("Unable to get base config: %v", err)
|
||||
}
|
||||
podClient := framework.Client.Pods(framework.Namespace.Name)
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-exec-websocket-" + string(util.NewUUID())
|
||||
@@ -835,13 +836,13 @@ var _ = KubeDescribe("Pods", func() {
|
||||
}()
|
||||
pod, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
framework.Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
expectNoError(framework.WaitForPodRunning(pod.Name))
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
req := framework.Client.Get().
|
||||
Namespace(framework.Namespace.Name).
|
||||
req := f.Client.Get().
|
||||
Namespace(f.Namespace.Name).
|
||||
Resource("pods").
|
||||
Name(pod.Name).
|
||||
Suffix("exec").
|
||||
@@ -852,9 +853,9 @@ var _ = KubeDescribe("Pods", func() {
|
||||
Param("command", "/etc/resolv.conf")
|
||||
|
||||
url := req.URL()
|
||||
ws, err := OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
|
||||
ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
|
||||
if err != nil {
|
||||
Failf("Failed to open websocket to %s: %v", url.String(), err)
|
||||
framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
|
||||
}
|
||||
defer ws.Close()
|
||||
|
||||
@@ -865,30 +866,30 @@ var _ = KubeDescribe("Pods", func() {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
Failf("Failed to read completely from websocket %s: %v", url.String(), err)
|
||||
framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
|
||||
}
|
||||
if len(msg) == 0 {
|
||||
continue
|
||||
}
|
||||
if msg[0] != 1 {
|
||||
Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg)
|
||||
framework.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg)
|
||||
}
|
||||
buf.Write(msg[1:])
|
||||
}
|
||||
if buf.Len() == 0 {
|
||||
Failf("Unexpected output from server")
|
||||
framework.Failf("Unexpected output from server")
|
||||
}
|
||||
if !strings.Contains(buf.String(), "nameserver") {
|
||||
Failf("Expected to find 'nameserver' in %q", buf.String())
|
||||
framework.Failf("Expected to find 'nameserver' in %q", buf.String())
|
||||
}
|
||||
})
|
||||
|
||||
It("should support retrieving logs from the container over websockets", func() {
|
||||
config, err := loadConfig()
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
Failf("Unable to get base config: %v", err)
|
||||
framework.Failf("Unable to get base config: %v", err)
|
||||
}
|
||||
podClient := framework.Client.Pods(framework.Namespace.Name)
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-logs-websocket-" + string(util.NewUUID())
|
||||
@@ -914,13 +915,13 @@ var _ = KubeDescribe("Pods", func() {
|
||||
}()
|
||||
pod, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
framework.Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
expectNoError(framework.WaitForPodRunning(pod.Name))
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
req := framework.Client.Get().
|
||||
Namespace(framework.Namespace.Name).
|
||||
req := f.Client.Get().
|
||||
Namespace(f.Namespace.Name).
|
||||
Resource("pods").
|
||||
Name(pod.Name).
|
||||
Suffix("log").
|
||||
@@ -928,9 +929,9 @@ var _ = KubeDescribe("Pods", func() {
|
||||
|
||||
url := req.URL()
|
||||
|
||||
ws, err := OpenWebSocketForURL(url, config, []string{"binary.k8s.io"})
|
||||
ws, err := framework.OpenWebSocketForURL(url, config, []string{"binary.k8s.io"})
|
||||
if err != nil {
|
||||
Failf("Failed to open websocket to %s: %v", url.String(), err)
|
||||
framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
|
||||
}
|
||||
defer ws.Close()
|
||||
buf := &bytes.Buffer{}
|
||||
@@ -940,7 +941,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
Failf("Failed to read completely from websocket %s: %v", url.String(), err)
|
||||
framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
|
||||
}
|
||||
if len(msg) == 0 {
|
||||
continue
|
||||
@@ -948,14 +949,14 @@ var _ = KubeDescribe("Pods", func() {
|
||||
buf.Write(msg)
|
||||
}
|
||||
if buf.String() != "container is alive\n" {
|
||||
Failf("Unexpected websocket logs:\n%s", buf.String())
|
||||
framework.Failf("Unexpected websocket logs:\n%s", buf.String())
|
||||
}
|
||||
})
|
||||
|
||||
It("should have their auto-restart back-off timer reset on image update [Slow]", func() {
|
||||
podName := "pod-back-off-image"
|
||||
containerName := "back-off"
|
||||
podClient := framework.Client.Pods(framework.Namespace.Name)
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
@@ -977,35 +978,35 @@ var _ = KubeDescribe("Pods", func() {
|
||||
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
}()
|
||||
|
||||
delay1, delay2 := startPodAndGetBackOffs(framework, pod, podName, containerName, buildBackOffDuration)
|
||||
delay1, delay2 := startPodAndGetBackOffs(f, pod, podName, containerName, buildBackOffDuration)
|
||||
|
||||
By("updating the image")
|
||||
pod, err := podClient.Get(pod.Name)
|
||||
if err != nil {
|
||||
Failf("failed to get pod: %v", err)
|
||||
framework.Failf("failed to get pod: %v", err)
|
||||
}
|
||||
pod.Spec.Containers[0].Image = "gcr.io/google_containers/nginx:1.7.9"
|
||||
pod, err = podClient.Update(pod)
|
||||
if err != nil {
|
||||
Failf("error updating pod=%s/%s %v", podName, containerName, err)
|
||||
framework.Failf("error updating pod=%s/%s %v", podName, containerName, err)
|
||||
}
|
||||
time.Sleep(syncLoopFrequency)
|
||||
expectNoError(framework.WaitForPodRunning(pod.Name))
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("get restart delay after image update")
|
||||
delayAfterUpdate, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName)
|
||||
delayAfterUpdate, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName)
|
||||
if err != nil {
|
||||
Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
}
|
||||
|
||||
if delayAfterUpdate > 2*delay2 || delayAfterUpdate > 2*delay1 {
|
||||
Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2)
|
||||
framework.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2)
|
||||
}
|
||||
})
|
||||
|
||||
// Slow issue #19027 (20 mins)
|
||||
It("should cap back-off at MaxContainerBackOff [Slow]", func() {
|
||||
podClient := framework.Client.Pods(framework.Namespace.Name)
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
podName := "back-off-cap"
|
||||
containerName := "back-off-cap"
|
||||
pod := &api.Pod{
|
||||
@@ -1029,7 +1030,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
}()
|
||||
|
||||
runPodFromStruct(framework, pod)
|
||||
runPodFromStruct(f, pod)
|
||||
time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x
|
||||
|
||||
// wait for a delay == capped delay of MaxContainerBackOff
|
||||
@@ -1039,9 +1040,9 @@ var _ = KubeDescribe("Pods", func() {
|
||||
err error
|
||||
)
|
||||
for i := 0; i < 3; i++ {
|
||||
delay1, err = getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName)
|
||||
delay1, err = getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName)
|
||||
if err != nil {
|
||||
Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
}
|
||||
|
||||
if delay1 < kubelet.MaxContainerBackOff {
|
||||
@@ -1050,17 +1051,17 @@ var _ = KubeDescribe("Pods", func() {
|
||||
}
|
||||
|
||||
if (delay1 < kubelet.MaxContainerBackOff) || (delay1 > maxBackOffTolerance) {
|
||||
Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1)
|
||||
framework.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1)
|
||||
}
|
||||
|
||||
By("getting restart delay after a capped delay")
|
||||
delay2, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName)
|
||||
delay2, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName)
|
||||
if err != nil {
|
||||
Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
}
|
||||
|
||||
if delay2 < kubelet.MaxContainerBackOff || delay2 > maxBackOffTolerance { // syncloop cumulative drift
|
||||
Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
|
||||
framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1071,12 +1072,12 @@ var _ = KubeDescribe("Pods", func() {
|
||||
// all providers), we can enable these tests.
|
||||
/*
|
||||
It("should support remote command execution", func() {
|
||||
clientConfig, err := loadConfig()
|
||||
clientConfig, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
Failf("Failed to create client config: %v", err)
|
||||
framework.Failf("Failed to create client config: %v", err)
|
||||
}
|
||||
|
||||
podClient := framework.Client.Pods(framework.Namespace.Name)
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-exec-" + string(util.NewUUID())
|
||||
@@ -1102,7 +1103,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
By("submitting the pod to kubernetes")
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
framework.Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
// We call defer here in case there is a problem with
|
||||
@@ -1112,45 +1113,45 @@ var _ = KubeDescribe("Pods", func() {
|
||||
}()
|
||||
|
||||
By("waiting for the pod to start running")
|
||||
expectNoError(framework.WaitForPodRunning(pod.Name))
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
Failf("Failed to query for pods: %v", err)
|
||||
framework.Failf("Failed to query for pods: %v", err)
|
||||
}
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
pod = &pods.Items[0]
|
||||
By(fmt.Sprintf("executing command on host %s pod %s in container %s",
|
||||
pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name))
|
||||
req := framework.Client.Get().
|
||||
req := f.Client.Get().
|
||||
Prefix("proxy").
|
||||
Resource("nodes").
|
||||
Name(pod.Status.Host).
|
||||
Suffix("exec", framework.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
Suffix("exec", f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
|
||||
out := &bytes.Buffer{}
|
||||
e := remotecommand.New(req, clientConfig, []string{"whoami"}, nil, out, nil, false)
|
||||
err = e.Execute()
|
||||
if err != nil {
|
||||
Failf("Failed to execute command on host %s pod %s in container %s: %v",
|
||||
framework.Failf("Failed to execute command on host %s pod %s in container %s: %v",
|
||||
pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name, err)
|
||||
}
|
||||
if e, a := "root\n", out.String(); e != a {
|
||||
Failf("exec: whoami: expected '%s', got '%s'", e, a)
|
||||
framework.Failf("exec: whoami: expected '%s', got '%s'", e, a)
|
||||
}
|
||||
})
|
||||
|
||||
It("should support port forwarding", func() {
|
||||
clientConfig, err := loadConfig()
|
||||
clientConfig, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
Failf("Failed to create client config: %v", err)
|
||||
framework.Failf("Failed to create client config: %v", err)
|
||||
}
|
||||
|
||||
podClient := framework.Client.Pods(framework.Namespace.Name)
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-portforward-" + string(util.NewUUID())
|
||||
@@ -1177,7 +1178,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
By("submitting the pod to kubernetes")
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
framework.Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
// We call defer here in case there is a problem with
|
||||
@@ -1187,14 +1188,14 @@ var _ = KubeDescribe("Pods", func() {
|
||||
}()
|
||||
|
||||
By("waiting for the pod to start running")
|
||||
expectNoError(framework.WaitForPodRunning(pod.Name))
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
Failf("Failed to query for pods: %v", err)
|
||||
framework.Failf("Failed to query for pods: %v", err)
|
||||
}
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
@@ -1202,16 +1203,16 @@ var _ = KubeDescribe("Pods", func() {
|
||||
By(fmt.Sprintf("initiating port forwarding to host %s pod %s in container %s",
|
||||
pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name))
|
||||
|
||||
req := framework.Client.Get().
|
||||
req := f.Client.Get().
|
||||
Prefix("proxy").
|
||||
Resource("nodes").
|
||||
Name(pod.Status.Host).
|
||||
Suffix("portForward", framework.Namespace.Name, pod.Name)
|
||||
Suffix("portForward", f.Namespace.Name, pod.Name)
|
||||
|
||||
stopChan := make(chan struct{})
|
||||
pf, err := portforward.New(req, clientConfig, []string{"5678:80"}, stopChan)
|
||||
if err != nil {
|
||||
Failf("Error creating port forwarder: %s", err)
|
||||
framework.Failf("Error creating port forwarder: %s", err)
|
||||
}
|
||||
|
||||
errorChan := make(chan error)
|
||||
@@ -1224,11 +1225,11 @@ var _ = KubeDescribe("Pods", func() {
|
||||
|
||||
resp, err := http.Get("http://localhost:5678/")
|
||||
if err != nil {
|
||||
Failf("Error with http get to localhost:5678: %s", err)
|
||||
framework.Failf("Error with http get to localhost:5678: %s", err)
|
||||
}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
Failf("Error reading response body: %s", err)
|
||||
framework.Failf("Error reading response body: %s", err)
|
||||
}
|
||||
|
||||
titleRegex := regexp.MustCompile("<title>(.+)</title>")
|
||||
@@ -1237,7 +1238,7 @@ var _ = KubeDescribe("Pods", func() {
|
||||
Fail("Unable to locate page title in response HTML")
|
||||
}
|
||||
if e, a := "Welcome to nginx on Debian!", matches[1]; e != a {
|
||||
Failf("<title>: expected '%s', got '%s'", e, a)
|
||||
framework.Failf("<title>: expected '%s', got '%s'", e, a)
|
||||
}
|
||||
})
|
||||
*/
|
||||
|
Reference in New Issue
Block a user