mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Merge pull request #67363 from hanxiaoshuai/fix0813
Automatic merge from submit-queue (batch tested with PRs 66920, 67316, 67363, 67528, 66963). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. some minor fix in test/e2e/kubectl/kubectl.go and remove some unused functions **What this PR does / why we need it**: some minor fix in test/e2e/kubectl/kubectl.go **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes # **Special notes for your reviewer**: **Release note**: ```release-note NONE ```
This commit is contained in:
commit
7b2f16be7b
@ -26,7 +26,6 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"mime/multipart"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@ -141,11 +140,11 @@ func substituteImageName(content string) string {
|
||||
contentWithImageName := new(bytes.Buffer)
|
||||
tmpl, err := template.New("imagemanifest").Parse(content)
|
||||
if err != nil {
|
||||
framework.Failf("Failed Parse the template:", err)
|
||||
framework.Failf("Failed Parse the template: %v", err)
|
||||
}
|
||||
err = tmpl.Execute(contentWithImageName, testImages)
|
||||
if err != nil {
|
||||
framework.Failf("Failed executing template:", err)
|
||||
framework.Failf("Failed executing template: %v", err)
|
||||
}
|
||||
return contentWithImageName.String()
|
||||
}
|
||||
@ -183,8 +182,6 @@ var _ = SIGDescribe("Kubectl alpha client", func() {
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
// Customized Wait / ForEach wrapper for this test. These demonstrate the
|
||||
|
||||
framework.KubeDescribe("Kubectl run CronJob", func() {
|
||||
var nsFlag string
|
||||
var cjName string
|
||||
@ -213,7 +210,7 @@ var _ = SIGDescribe("Kubectl alpha client", func() {
|
||||
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
|
||||
}
|
||||
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
|
||||
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
|
||||
if checkContainersImage(containers, busyboxImage) {
|
||||
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
|
||||
}
|
||||
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
|
||||
@ -255,7 +252,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
if err != nil || len(pods) < atLeast {
|
||||
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
|
||||
framework.DumpAllNamespaceInfo(f.ClientSet, ns)
|
||||
framework.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
|
||||
framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1269,7 +1266,7 @@ metadata:
|
||||
framework.Failf("Failed getting rc %s: %v", rcName, err)
|
||||
}
|
||||
containers := rc.Spec.Template.Spec.Containers
|
||||
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
|
||||
if checkContainersImage(containers, nginxImage) {
|
||||
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
|
||||
}
|
||||
|
||||
@ -1330,7 +1327,7 @@ metadata:
|
||||
framework.Failf("Failed getting rc %s: %v", rcName, err)
|
||||
}
|
||||
containers := rc.Spec.Template.Spec.Containers
|
||||
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
|
||||
if checkContainersImage(containers, nginxImage) {
|
||||
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
|
||||
}
|
||||
framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout)
|
||||
@ -1380,7 +1377,7 @@ metadata:
|
||||
framework.Failf("Failed getting deployment %s: %v", dName, err)
|
||||
}
|
||||
containers := d.Spec.Template.Spec.Containers
|
||||
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
|
||||
if checkContainersImage(containers, nginxImage) {
|
||||
framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage)
|
||||
}
|
||||
|
||||
@ -1425,7 +1422,7 @@ metadata:
|
||||
framework.Failf("Failed getting job %s: %v", jobName, err)
|
||||
}
|
||||
containers := job.Spec.Template.Spec.Containers
|
||||
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
|
||||
if checkContainersImage(containers, nginxImage) {
|
||||
framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers)
|
||||
}
|
||||
if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
|
||||
@ -1462,7 +1459,7 @@ metadata:
|
||||
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
|
||||
}
|
||||
containers := cj.Spec.JobTemplate.Spec.Template.Spec.Containers
|
||||
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
|
||||
if checkContainersImage(containers, busyboxImage) {
|
||||
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
|
||||
}
|
||||
if cj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
|
||||
@ -1498,7 +1495,7 @@ metadata:
|
||||
framework.Failf("Failed getting pod %s: %v", podName, err)
|
||||
}
|
||||
containers := pod.Spec.Containers
|
||||
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
|
||||
if checkContainersImage(containers, nginxImage) {
|
||||
framework.Failf("Failed creating pod %s with expected image %s", podName, nginxImage)
|
||||
}
|
||||
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
|
||||
@ -1552,7 +1549,7 @@ metadata:
|
||||
framework.Failf("Failed getting deployment %s: %v", podName, err)
|
||||
}
|
||||
containers := pod.Spec.Containers
|
||||
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
|
||||
if checkContainersImage(containers, busyboxImage) {
|
||||
framework.Failf("Failed creating pod with expected image %s", busyboxImage)
|
||||
}
|
||||
})
|
||||
@ -1868,6 +1865,10 @@ func checkKubectlOutputWithRetry(required [][]string, args ...string) {
|
||||
return
|
||||
}
|
||||
|
||||
func checkContainersImage(containers []v1.Container, expectImage string) bool {
|
||||
return containers == nil || len(containers) != 1 || containers[0].Image != expectImage
|
||||
}
|
||||
|
||||
func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
|
||||
body, err := curl(apiEndpoint)
|
||||
if err != nil {
|
||||
@ -1993,21 +1994,6 @@ type updateDemoData struct {
|
||||
|
||||
const applyTestLabel = "kubectl.kubernetes.io/apply-test"
|
||||
|
||||
func readBytesFromFile(filename string) []byte {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
func readReplicationControllerFromString(contents string) *v1.ReplicationController {
|
||||
rc := v1.ReplicationController{}
|
||||
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
|
||||
@ -2124,47 +2110,6 @@ func newBlockingReader(s string) (io.Reader, io.Closer, error) {
|
||||
return r, w, nil
|
||||
}
|
||||
|
||||
// newStreamingUpload creates a new http.Request that will stream POST
|
||||
// a file to a URI.
|
||||
func newStreamingUpload(filePath string) (*io.PipeReader, *multipart.Writer, error) {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
r, w := io.Pipe()
|
||||
|
||||
postBodyWriter := multipart.NewWriter(w)
|
||||
|
||||
go streamingUpload(file, filepath.Base(filePath), postBodyWriter, w)
|
||||
return r, postBodyWriter, err
|
||||
}
|
||||
|
||||
// streamingUpload streams a file via a pipe through a multipart.Writer.
|
||||
// Generally one should use newStreamingUpload instead of calling this directly.
|
||||
func streamingUpload(file *os.File, fileName string, postBodyWriter *multipart.Writer, w *io.PipeWriter) {
|
||||
defer GinkgoRecover()
|
||||
defer file.Close()
|
||||
defer w.Close()
|
||||
|
||||
// Set up the form file
|
||||
fileWriter, err := postBodyWriter.CreateFormFile("file", fileName)
|
||||
if err != nil {
|
||||
framework.Failf("Unable to write file at %s to buffer. Error: %s", fileName, err)
|
||||
}
|
||||
|
||||
// Copy kubectl binary into the file writer
|
||||
if _, err := io.Copy(fileWriter, file); err != nil {
|
||||
framework.Failf("Unable to copy file at %s into the file writer. Error: %s", fileName, err)
|
||||
}
|
||||
|
||||
// Nothing more should be written to this instance of the postBodyWriter
|
||||
if err := postBodyWriter.Close(); err != nil {
|
||||
framework.Failf("Unable to close the writer for file upload. Error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) {
|
||||
logs = &bytes.Buffer{}
|
||||
p := goproxy.NewProxyHttpServer()
|
||||
|
Loading…
Reference in New Issue
Block a user