mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
move funs of framework/volume to e2e/storage
Signed-off-by: clarklee92 <clarklee1992@hotmail.com>
This commit is contained in:
parent
36db62cd73
commit
7171f9a7f4
@ -14,7 +14,6 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/pv:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
@ -52,7 +52,6 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -84,9 +83,6 @@ const (
|
||||
// PodCleanupTimeout is a waiting period for pod to be cleaned up and unmount its volumes so we
|
||||
// don't tear down containers with NFS/Ceph/Gluster server too early.
|
||||
PodCleanupTimeout = 20 * time.Second
|
||||
|
||||
// Template for iSCSI IQN.
|
||||
iSCSIIQNTemplate = "iqn.2003-01.io.k8s:e2e.%s"
|
||||
)
|
||||
|
||||
// SizeRange encapsulates a range of sizes specified as minimum and maximum quantity strings
|
||||
@ -208,74 +204,11 @@ func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestCo
|
||||
return config, pod, ip
|
||||
}
|
||||
|
||||
// NewISCSIServer is an iSCSI-specific wrapper for CreateStorageServer.
|
||||
func NewISCSIServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, ip, iqn string) {
|
||||
// Generate cluster-wide unique IQN
|
||||
iqn = fmt.Sprintf(iSCSIIQNTemplate, namespace)
|
||||
config = TestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "iscsi",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer),
|
||||
ServerArgs: []string{iqn},
|
||||
ServerVolumes: map[string]string{
|
||||
// iSCSI container needs to insert modules from the host
|
||||
"/lib/modules": "/lib/modules",
|
||||
// iSCSI container needs to configure kernel
|
||||
"/sys/kernel": "/sys/kernel",
|
||||
// iSCSI source "block devices" must be available on the host
|
||||
"/srv/iscsi": "/srv/iscsi",
|
||||
},
|
||||
ServerReadyMessage: "iscsi target started",
|
||||
ServerHostNetwork: true,
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
// Make sure the client runs on the same node as server so we don't need to open any firewalls.
|
||||
config.ClientNodeName = pod.Spec.NodeName
|
||||
return config, pod, ip, iqn
|
||||
}
|
||||
|
||||
// NewRBDServer is a CephRBD-specific wrapper for CreateStorageServer.
|
||||
func NewRBDServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
|
||||
config = TestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "rbd",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer),
|
||||
ServerPorts: []int{6789},
|
||||
ServerVolumes: map[string]string{
|
||||
"/lib/modules": "/lib/modules",
|
||||
},
|
||||
ServerReadyMessage: "Ceph is ready",
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
// create secrets for the server
|
||||
secret = &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
// from test/images/volumes-tester/rbd/keyring
|
||||
"key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="),
|
||||
},
|
||||
Type: "kubernetes.io/rbd",
|
||||
}
|
||||
|
||||
secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create secrets for Ceph RBD: %v", err)
|
||||
}
|
||||
|
||||
return config, pod, secret, ip
|
||||
}
|
||||
|
||||
// CreateStorageServer is a wrapper for StartVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||
// CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||
// and ip address string are returned.
|
||||
// Note: Expect() is called so no error is returned.
|
||||
func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) {
|
||||
pod = StartVolumeServer(cs, config)
|
||||
pod = startVolumeServer(cs, config)
|
||||
gomega.Expect(pod).NotTo(gomega.BeNil(), "storage server pod should not be nil")
|
||||
ip = pod.Status.PodIP
|
||||
gomega.Expect(len(ip)).NotTo(gomega.BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
|
||||
@ -283,10 +216,10 @@ func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod
|
||||
return pod, ip
|
||||
}
|
||||
|
||||
// StartVolumeServer starts a container specified by config.serverImage and exports all
|
||||
// startVolumeServer starts a container specified by config.serverImage and exports all
|
||||
// config.serverPorts from it. The returned pod should be used to get the server
|
||||
// IP address and create appropriate VolumeSource.
|
||||
func StartVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
|
||||
func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
|
||||
podClient := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
portCount := len(config.ServerPorts)
|
||||
@ -400,31 +333,6 @@ func StartVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
|
||||
return pod
|
||||
}
|
||||
|
||||
// CleanUpVolumeServer is a wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function.
|
||||
func CleanUpVolumeServer(f *framework.Framework, serverPod *v1.Pod) {
|
||||
CleanUpVolumeServerWithSecret(f, serverPod, nil)
|
||||
}
|
||||
|
||||
// CleanUpVolumeServerWithSecret is a wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function.
|
||||
func CleanUpVolumeServerWithSecret(f *framework.Framework, serverPod *v1.Pod, secret *v1.Secret) {
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
if secret != nil {
|
||||
framework.Logf("Deleting server secret %q...", secret.Name)
|
||||
err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Delete secret failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("Deleting server pod %q...", serverPod.Name)
|
||||
err := e2epod.DeletePodWithWait(cs, serverPod)
|
||||
if err != nil {
|
||||
framework.Logf("Server pod delete failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCleanup cleans both server and client pods.
|
||||
func TestCleanup(f *framework.Framework, config TestConfig) {
|
||||
ginkgo.By(fmt.Sprint("cleaning the environment after ", config.Prefix))
|
||||
@ -623,19 +531,6 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
|
||||
testVolumeContent(f, injectorPod, fsGroup, fsType, tests)
|
||||
}
|
||||
|
||||
// CreateGCEVolume creates PersistentVolumeSource for GCEVolume.
|
||||
func CreateGCEVolume() (*v1.PersistentVolumeSource, string) {
|
||||
diskName, err := e2epv.CreatePDWithRetry()
|
||||
framework.ExpectNoError(err)
|
||||
return &v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}, diskName
|
||||
}
|
||||
|
||||
// GenerateScriptCmd generates the corresponding command lines to execute a command.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func GenerateScriptCmd(command string) []string {
|
||||
@ -697,25 +592,6 @@ func generateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
|
||||
return commands
|
||||
}
|
||||
|
||||
// GenerateWriteandExecuteScriptFileCmd generates the corresponding command lines to write a file with the given file path
|
||||
// and also execute this file.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func GenerateWriteandExecuteScriptFileCmd(content, fileName, filePath string) []string {
|
||||
// for windows cluster, modify the Pod spec.
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
scriptName := fmt.Sprintf("%s.ps1", fileName)
|
||||
fullPath := filepath.Join(filePath, scriptName)
|
||||
|
||||
cmd := "echo \"" + content + "\" > " + fullPath + "; .\\" + fullPath
|
||||
framework.Logf("generated pod command %s", cmd)
|
||||
return []string{"powershell", "/c", cmd}
|
||||
}
|
||||
scriptName := fmt.Sprintf("%s.sh", fileName)
|
||||
fullPath := filepath.Join(filePath, scriptName)
|
||||
cmd := fmt.Sprintf("echo \"%s\" > %s; chmod u+x %s; %s;", content, fullPath, fullPath, fullPath)
|
||||
return []string{"/bin/sh", "-ec", cmd}
|
||||
}
|
||||
|
||||
// GenerateSecurityContext generates the corresponding container security context with the given inputs
|
||||
// If the Node OS is windows, currently we will ignore the inputs and return nil.
|
||||
// TODO: Will modify it after windows has its own security context
|
||||
|
@ -51,6 +51,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
@ -64,6 +65,11 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
// Template for iSCSI IQN.
|
||||
iSCSIIQNTemplate = "iqn.2003-01.io.k8s:e2e.%s"
|
||||
)
|
||||
|
||||
// NFS
|
||||
type nfsDriver struct {
|
||||
externalProvisionerPod *v1.Pod
|
||||
@ -207,7 +213,7 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
|
||||
}
|
||||
|
||||
func (v *nfsVolume) DeleteVolume() {
|
||||
volume.CleanUpVolumeServer(v.f, v.serverPod)
|
||||
cleanUpVolumeServer(v.f, v.serverPod)
|
||||
}
|
||||
|
||||
// Gluster
|
||||
@ -430,7 +436,7 @@ func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
c, serverPod, serverIP, iqn := volume.NewISCSIServer(cs, ns.Name)
|
||||
c, serverPod, serverIP, iqn := newISCSIServer(cs, ns.Name)
|
||||
config.ServerConfig = &c
|
||||
config.ClientNodeName = c.ClientNodeName
|
||||
return &iSCSIVolume{
|
||||
@ -441,8 +447,71 @@ func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
|
||||
}
|
||||
}
|
||||
|
||||
// newISCSIServer is an iSCSI-specific wrapper for CreateStorageServer.
|
||||
func newISCSIServer(cs clientset.Interface, namespace string) (config volume.TestConfig, pod *v1.Pod, ip, iqn string) {
|
||||
// Generate cluster-wide unique IQN
|
||||
iqn = fmt.Sprintf(iSCSIIQNTemplate, namespace)
|
||||
config = volume.TestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "iscsi",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer),
|
||||
ServerArgs: []string{iqn},
|
||||
ServerVolumes: map[string]string{
|
||||
// iSCSI container needs to insert modules from the host
|
||||
"/lib/modules": "/lib/modules",
|
||||
// iSCSI container needs to configure kernel
|
||||
"/sys/kernel": "/sys/kernel",
|
||||
// iSCSI source "block devices" must be available on the host
|
||||
"/srv/iscsi": "/srv/iscsi",
|
||||
},
|
||||
ServerReadyMessage: "iscsi target started",
|
||||
ServerHostNetwork: true,
|
||||
}
|
||||
pod, ip = volume.CreateStorageServer(cs, config)
|
||||
// Make sure the client runs on the same node as server so we don't need to open any firewalls.
|
||||
config.ClientNodeName = pod.Spec.NodeName
|
||||
return config, pod, ip, iqn
|
||||
}
|
||||
|
||||
// newRBDServer is a CephRBD-specific wrapper for CreateStorageServer.
|
||||
func newRBDServer(cs clientset.Interface, namespace string) (config volume.TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
|
||||
config = volume.TestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "rbd",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer),
|
||||
ServerPorts: []int{6789},
|
||||
ServerVolumes: map[string]string{
|
||||
"/lib/modules": "/lib/modules",
|
||||
},
|
||||
ServerReadyMessage: "Ceph is ready",
|
||||
}
|
||||
pod, ip = volume.CreateStorageServer(cs, config)
|
||||
// create secrets for the server
|
||||
secret = &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
// from test/images/volumes-tester/rbd/keyring
|
||||
"key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="),
|
||||
},
|
||||
Type: "kubernetes.io/rbd",
|
||||
}
|
||||
|
||||
secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create secrets for Ceph RBD: %v", err)
|
||||
}
|
||||
|
||||
return config, pod, secret, ip
|
||||
}
|
||||
|
||||
func (v *iSCSIVolume) DeleteVolume() {
|
||||
volume.CleanUpVolumeServer(v.f, v.serverPod)
|
||||
cleanUpVolumeServer(v.f, v.serverPod)
|
||||
}
|
||||
|
||||
// Ceph RBD
|
||||
@ -559,7 +628,7 @@ func (r *rbdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
c, serverPod, secret, serverIP := volume.NewRBDServer(cs, ns.Name)
|
||||
c, serverPod, secret, serverIP := newRBDServer(cs, ns.Name)
|
||||
config.ServerConfig = &c
|
||||
return &rbdVolume{
|
||||
serverPod: serverPod,
|
||||
@ -570,7 +639,7 @@ func (r *rbdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
|
||||
}
|
||||
|
||||
func (v *rbdVolume) DeleteVolume() {
|
||||
volume.CleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret)
|
||||
cleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret)
|
||||
}
|
||||
|
||||
// Ceph
|
||||
@ -669,7 +738,7 @@ func (c *cephFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType te
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
cfg, serverPod, secret, serverIP := volume.NewRBDServer(cs, ns.Name)
|
||||
cfg, serverPod, secret, serverIP := newRBDServer(cs, ns.Name)
|
||||
config.ServerConfig = &cfg
|
||||
return &cephVolume{
|
||||
serverPod: serverPod,
|
||||
@ -680,7 +749,7 @@ func (c *cephFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType te
|
||||
}
|
||||
|
||||
func (v *cephVolume) DeleteVolume() {
|
||||
volume.CleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret)
|
||||
cleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret)
|
||||
}
|
||||
|
||||
// Hostpath
|
||||
@ -1838,3 +1907,28 @@ func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, vo
|
||||
},
|
||||
}, l.nodeAffinityForNode(lv.ltr.Node)
|
||||
}
|
||||
|
||||
// cleanUpVolumeServer is a wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function.
|
||||
func cleanUpVolumeServer(f *framework.Framework, serverPod *v1.Pod) {
|
||||
cleanUpVolumeServerWithSecret(f, serverPod, nil)
|
||||
}
|
||||
|
||||
// cleanUpVolumeServerWithSecret is a wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function.
|
||||
func cleanUpVolumeServerWithSecret(f *framework.Framework, serverPod *v1.Pod, secret *v1.Secret) {
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
if secret != nil {
|
||||
framework.Logf("Deleting server secret %q...", secret.Name)
|
||||
err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Delete secret failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("Deleting server pod %q...", serverPod.Name)
|
||||
err := e2epod.DeletePodWithWait(cs, serverPod)
|
||||
if err != nil {
|
||||
framework.Logf("Server pod delete failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
|
||||
framework.SkipUnlessSSHKeyPresent()
|
||||
|
||||
ginkgo.By("Initializing first PD with PVPVC binding")
|
||||
pvSource1, diskName1 = volume.CreateGCEVolume()
|
||||
pvSource1, diskName1 = createGCEVolume()
|
||||
framework.ExpectNoError(err)
|
||||
pvConfig1 = e2epv.PersistentVolumeConfig{
|
||||
NamePrefix: "gce-",
|
||||
@ -163,7 +163,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv1, pvc1))
|
||||
|
||||
ginkgo.By("Initializing second PD with PVPVC binding")
|
||||
pvSource2, diskName2 = volume.CreateGCEVolume()
|
||||
pvSource2, diskName2 = createGCEVolume()
|
||||
framework.ExpectNoError(err)
|
||||
pvConfig2 = e2epv.PersistentVolumeConfig{
|
||||
NamePrefix: "gce-",
|
||||
@ -274,6 +274,19 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
// createGCEVolume creates PersistentVolumeSource for GCEVolume.
|
||||
func createGCEVolume() (*v1.PersistentVolumeSource, string) {
|
||||
diskName, err := e2epv.CreatePDWithRetry()
|
||||
framework.ExpectNoError(err)
|
||||
return &v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}, diskName
|
||||
}
|
||||
|
||||
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed
|
||||
// by the test.
|
||||
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
|
@ -23,6 +23,7 @@ package testsuites
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
@ -216,7 +217,7 @@ func testScriptInPod(
|
||||
} else {
|
||||
content = fmt.Sprintf("ls %s", volPath)
|
||||
}
|
||||
command := volume.GenerateWriteandExecuteScriptFileCmd(content, fileName, volPath)
|
||||
command := generateWriteandExecuteScriptFileCmd(content, fileName, volPath)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("exec-volume-test-%s", suffix),
|
||||
@ -254,3 +255,22 @@ func testScriptInPod(
|
||||
err := e2epod.DeletePodWithWait(f.ClientSet, pod)
|
||||
framework.ExpectNoError(err, "while deleting pod")
|
||||
}
|
||||
|
||||
// generateWriteandExecuteScriptFileCmd generates the corresponding command lines to write a file with the given file path
|
||||
// and also execute this file.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func generateWriteandExecuteScriptFileCmd(content, fileName, filePath string) []string {
|
||||
// for windows cluster, modify the Pod spec.
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
scriptName := fmt.Sprintf("%s.ps1", fileName)
|
||||
fullPath := filepath.Join(filePath, scriptName)
|
||||
|
||||
cmd := "echo \"" + content + "\" > " + fullPath + "; .\\" + fullPath
|
||||
framework.Logf("generated pod command %s", cmd)
|
||||
return []string{"powershell", "/c", cmd}
|
||||
}
|
||||
scriptName := fmt.Sprintf("%s.sh", fileName)
|
||||
fullPath := filepath.Join(filePath, scriptName)
|
||||
cmd := fmt.Sprintf("echo \"%s\" > %s; chmod u+x %s; %s;", content, fullPath, fullPath, fullPath)
|
||||
return []string{"/bin/sh", "-ec", cmd}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user