test/e2e_node

This commit is contained in:
Chao Xu 2016-11-18 12:55:46 -08:00
parent f3b5d514ab
commit 29400ac195
23 changed files with 386 additions and 384 deletions

View File

@ -28,7 +28,7 @@ import (
"k8s.io/client-go/pkg/api/errors" "k8s.io/client-go/pkg/api/errors"
"k8s.io/client-go/pkg/api/unversioned" "k8s.io/client-go/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -138,7 +138,7 @@ func loadTestProfiles() error {
return nil return nil
} }
func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) api.PodStatus { func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.PodStatus {
pod := createPodWithAppArmor(f, profile) pod := createPodWithAppArmor(f, profile)
if shouldRun { if shouldRun {
// The pod needs to start before it stops, so wait for the longer start timeout. // The pod needs to start before it stops, so wait for the longer start timeout.
@ -146,7 +146,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) api
f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout)) f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
} else { } else {
// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor". // Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
w, err := f.PodClient().Watch(api.SingleObject(api.ObjectMeta{Name: pod.Name})) w, err := f.PodClient().Watch(v1.SingleObject(v1.ObjectMeta{Name: pod.Name}))
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) { _, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
switch e.Type { switch e.Type {
@ -154,7 +154,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) api
return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, pod.Name) return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, pod.Name)
} }
switch t := e.Object.(type) { switch t := e.Object.(type) {
case *api.Pod: case *v1.Pod:
if t.Status.Reason == "AppArmor" { if t.Status.Reason == "AppArmor" {
return true, nil return true, nil
} }
@ -168,29 +168,29 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) api
return p.Status return p.Status
} }
func createPodWithAppArmor(f *framework.Framework, profile string) *api.Pod { func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod {
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("test-apparmor-%s", strings.Replace(profile, "/", "-", -1)), Name: fmt.Sprintf("test-apparmor-%s", strings.Replace(profile, "/", "-", -1)),
Annotations: map[string]string{ Annotations: map[string]string{
apparmor.ContainerAnnotationKeyPrefix + "test": profile, apparmor.ContainerAnnotationKeyPrefix + "test": profile,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{{ Containers: []v1.Container{{
Name: "test", Name: "test",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"touch", "foo"}, Command: []string{"touch", "foo"},
}}, }},
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
return f.PodClient().Create(pod) return f.PodClient().Create(pod)
} }
func expectSoftRejection(status api.PodStatus) { func expectSoftRejection(status v1.PodStatus) {
args := []interface{}{"PodStatus: %+v", status} args := []interface{}{"PodStatus: %+v", status}
Expect(status.Phase).To(Equal(api.PodPending), args...) Expect(status.Phase).To(Equal(v1.PodPending), args...)
Expect(status.Reason).To(Equal("AppArmor"), args...) Expect(status.Reason).To(Equal("AppArmor"), args...)
Expect(status.Message).To(ContainSubstring("AppArmor"), args...) Expect(status.Message).To(ContainSubstring("AppArmor"), args...)
Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(Equal("Blocked"), args...) Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(Equal("Blocked"), args...)

View File

@ -17,8 +17,8 @@ limitations under the License.
package e2e_node package e2e_node
import ( import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -30,27 +30,27 @@ import (
// getResourceList returns a ResourceList with the // getResourceList returns a ResourceList with the
// specified cpu and memory resource values // specified cpu and memory resource values
func getResourceList(cpu, memory string) api.ResourceList { func getResourceList(cpu, memory string) v1.ResourceList {
res := api.ResourceList{} res := v1.ResourceList{}
if cpu != "" { if cpu != "" {
res[api.ResourceCPU] = resource.MustParse(cpu) res[v1.ResourceCPU] = resource.MustParse(cpu)
} }
if memory != "" { if memory != "" {
res[api.ResourceMemory] = resource.MustParse(memory) res[v1.ResourceMemory] = resource.MustParse(memory)
} }
return res return res
} }
// getResourceRequirements returns a ResourceRequirements object // getResourceRequirements returns a ResourceRequirements object
func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements { func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
res := api.ResourceRequirements{} res := v1.ResourceRequirements{}
res.Requests = requests res.Requests = requests
res.Limits = limits res.Limits = limits
return res return res
} }
// makePodToVerifyCgroups returns a pod that verifies the existence of the specified cgroups. // makePodToVerifyCgroups returns a pod that verifies the existence of the specified cgroups.
func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *api.Pod { func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *v1.Pod {
// convert the names to their literal cgroupfs forms... // convert the names to their literal cgroupfs forms...
cgroupFsNames := []string{} cgroupFsNames := []string{}
for _, cgroupName := range cgroupNames { for _, cgroupName := range cgroupNames {
@ -68,18 +68,18 @@ func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *api.Pod {
command += localCommand command += localCommand
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()), Name: "pod" + string(uuid.NewUUID()),
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Name: "container" + string(uuid.NewUUID()), Name: "container" + string(uuid.NewUUID()),
Command: []string{"sh", "-c", command}, Command: []string{"sh", "-c", command},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "sysfscgroup", Name: "sysfscgroup",
MountPath: "/tmp", MountPath: "/tmp",
@ -87,11 +87,11 @@ func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *api.Pod {
}, },
}, },
}, },
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "sysfscgroup", Name: "sysfscgroup",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
HostPath: &api.HostPathVolumeSource{Path: "/sys/fs/cgroup"}, HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
}, },
}, },
}, },
@ -101,23 +101,23 @@ func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *api.Pod {
} }
// makePodToVerifyCgroupRemoved verfies the specified cgroup does not exist. // makePodToVerifyCgroupRemoved verfies the specified cgroup does not exist.
func makePodToVerifyCgroupRemoved(cgroupName cm.CgroupName) *api.Pod { func makePodToVerifyCgroupRemoved(cgroupName cm.CgroupName) *v1.Pod {
cgroupFsName := string(cgroupName) cgroupFsName := string(cgroupName)
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" { if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
cgroupFsName = cm.ConvertCgroupNameToSystemd(cm.CgroupName(cgroupName), true) cgroupFsName = cm.ConvertCgroupNameToSystemd(cm.CgroupName(cgroupName), true)
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()), Name: "pod" + string(uuid.NewUUID()),
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyOnFailure, RestartPolicy: v1.RestartPolicyOnFailure,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Name: "container" + string(uuid.NewUUID()), Name: "container" + string(uuid.NewUUID()),
Command: []string{"sh", "-c", "for i in `seq 1 10`; do if [ ! -d /tmp/memory/" + cgroupFsName + " ] && [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 0; else sleep 10; fi; done; exit 1"}, Command: []string{"sh", "-c", "for i in `seq 1 10`; do if [ ! -d /tmp/memory/" + cgroupFsName + " ] && [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 0; else sleep 10; fi; done; exit 1"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "sysfscgroup", Name: "sysfscgroup",
MountPath: "/tmp", MountPath: "/tmp",
@ -125,11 +125,11 @@ func makePodToVerifyCgroupRemoved(cgroupName cm.CgroupName) *api.Pod {
}, },
}, },
}, },
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "sysfscgroup", Name: "sysfscgroup",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
HostPath: &api.HostPathVolumeSource{Path: "/sys/fs/cgroup"}, HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
}, },
}, },
}, },
@ -162,17 +162,17 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
return return
} }
var ( var (
guaranteedPod *api.Pod guaranteedPod *v1.Pod
podUID string podUID string
) )
By("Creating a Guaranteed pod in Namespace", func() { By("Creating a Guaranteed pod in Namespace", func() {
guaranteedPod = f.PodClient().Create(&api.Pod{ guaranteedPod = f.PodClient().Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()), Name: "pod" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
Name: "container" + string(uuid.NewUUID()), Name: "container" + string(uuid.NewUUID()),
@ -192,7 +192,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
}) })
By("Checking if the pod cgroup was deleted", func() { By("Checking if the pod cgroup was deleted", func() {
gp := int64(1) gp := int64(1)
Expect(f.PodClient().Delete(guaranteedPod.Name, &api.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred()) Expect(f.PodClient().Delete(guaranteedPod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("pod" + podUID)) pod := makePodToVerifyCgroupRemoved(cm.CgroupName("pod" + podUID))
f.PodClient().Create(pod) f.PodClient().Create(pod)
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
@ -207,16 +207,16 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
} }
var ( var (
podUID string podUID string
bestEffortPod *api.Pod bestEffortPod *v1.Pod
) )
By("Creating a BestEffort pod in Namespace", func() { By("Creating a BestEffort pod in Namespace", func() {
bestEffortPod = f.PodClient().Create(&api.Pod{ bestEffortPod = f.PodClient().Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()), Name: "pod" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
Name: "container" + string(uuid.NewUUID()), Name: "container" + string(uuid.NewUUID()),
@ -236,7 +236,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
}) })
By("Checking if the pod cgroup was deleted", func() { By("Checking if the pod cgroup was deleted", func() {
gp := int64(1) gp := int64(1)
Expect(f.PodClient().Delete(bestEffortPod.Name, &api.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred()) Expect(f.PodClient().Delete(bestEffortPod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("BestEffort/pod" + podUID)) pod := makePodToVerifyCgroupRemoved(cm.CgroupName("BestEffort/pod" + podUID))
f.PodClient().Create(pod) f.PodClient().Create(pod)
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
@ -251,16 +251,16 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
} }
var ( var (
podUID string podUID string
burstablePod *api.Pod burstablePod *v1.Pod
) )
By("Creating a Burstable pod in Namespace", func() { By("Creating a Burstable pod in Namespace", func() {
burstablePod = f.PodClient().Create(&api.Pod{ burstablePod = f.PodClient().Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()), Name: "pod" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
Name: "container" + string(uuid.NewUUID()), Name: "container" + string(uuid.NewUUID()),
@ -280,7 +280,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
}) })
By("Checking if the pod cgroup was deleted", func() { By("Checking if the pod cgroup was deleted", func() {
gp := int64(1) gp := int64(1)
Expect(f.PodClient().Delete(burstablePod.Name, &api.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred()) Expect(f.PodClient().Delete(burstablePod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("Burstable/pod" + podUID)) pod := makePodToVerifyCgroupRemoved(cm.CgroupName("Burstable/pod" + podUID))
f.PodClient().Create(pod) f.PodClient().Create(pod)
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)

View File

@ -19,8 +19,8 @@ package e2e_node
import ( import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@ -28,29 +28,29 @@ import (
// One pod one container // One pod one container
// TODO: This should be migrated to the e2e framework. // TODO: This should be migrated to the e2e framework.
type ConformanceContainer struct { type ConformanceContainer struct {
Container api.Container Container v1.Container
RestartPolicy api.RestartPolicy RestartPolicy v1.RestartPolicy
Volumes []api.Volume Volumes []v1.Volume
ImagePullSecrets []string ImagePullSecrets []string
PodClient *framework.PodClient PodClient *framework.PodClient
podName string podName string
PodSecurityContext *api.PodSecurityContext PodSecurityContext *v1.PodSecurityContext
} }
func (cc *ConformanceContainer) Create() { func (cc *ConformanceContainer) Create() {
cc.podName = cc.Container.Name + string(uuid.NewUUID()) cc.podName = cc.Container.Name + string(uuid.NewUUID())
imagePullSecrets := []api.LocalObjectReference{} imagePullSecrets := []v1.LocalObjectReference{}
for _, s := range cc.ImagePullSecrets { for _, s := range cc.ImagePullSecrets {
imagePullSecrets = append(imagePullSecrets, api.LocalObjectReference{Name: s}) imagePullSecrets = append(imagePullSecrets, v1.LocalObjectReference{Name: s})
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: cc.podName, Name: cc.podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: cc.RestartPolicy, RestartPolicy: cc.RestartPolicy,
Containers: []api.Container{ Containers: []v1.Container{
cc.Container, cc.Container,
}, },
SecurityContext: cc.PodSecurityContext, SecurityContext: cc.PodSecurityContext,
@ -62,7 +62,7 @@ func (cc *ConformanceContainer) Create() {
} }
func (cc *ConformanceContainer) Delete() error { func (cc *ConformanceContainer) Delete() error {
return cc.PodClient.Delete(cc.podName, api.NewDeleteOptions(0)) return cc.PodClient.Delete(cc.podName, v1.NewDeleteOptions(0))
} }
func (cc *ConformanceContainer) IsReady() (bool, error) { func (cc *ConformanceContainer) IsReady() (bool, error) {
@ -70,25 +70,25 @@ func (cc *ConformanceContainer) IsReady() (bool, error) {
if err != nil { if err != nil {
return false, err return false, err
} }
return api.IsPodReady(pod), nil return v1.IsPodReady(pod), nil
} }
func (cc *ConformanceContainer) GetPhase() (api.PodPhase, error) { func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
pod, err := cc.PodClient.Get(cc.podName) pod, err := cc.PodClient.Get(cc.podName)
if err != nil { if err != nil {
return api.PodUnknown, err return v1.PodUnknown, err
} }
return pod.Status.Phase, nil return pod.Status.Phase, nil
} }
func (cc *ConformanceContainer) GetStatus() (api.ContainerStatus, error) { func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
pod, err := cc.PodClient.Get(cc.podName) pod, err := cc.PodClient.Get(cc.podName)
if err != nil { if err != nil {
return api.ContainerStatus{}, err return v1.ContainerStatus{}, err
} }
statuses := pod.Status.ContainerStatuses statuses := pod.Status.ContainerStatuses
if len(statuses) != 1 || statuses[0].Name != cc.Container.Name { if len(statuses) != 1 || statuses[0].Name != cc.Container.Name {
return api.ContainerStatus{}, fmt.Errorf("unexpected container statuses %v", statuses) return v1.ContainerStatus{}, fmt.Errorf("unexpected container statuses %v", statuses)
} }
return statuses[0], nil return statuses[0], nil
} }
@ -113,7 +113,7 @@ const (
ContainerStateUnknown ContainerState = "Unknown" ContainerStateUnknown ContainerState = "Unknown"
) )
func GetContainerState(state api.ContainerState) ContainerState { func GetContainerState(state v1.ContainerState) ContainerState {
if state.Waiting != nil { if state.Waiting != nil {
return ContainerStateWaiting return ContainerStateWaiting
} }

View File

@ -26,8 +26,8 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -95,12 +95,12 @@ var _ = framework.KubeDescribe("Kubelet Container Manager [Serial]", func() {
var err error var err error
podClient := f.PodClient() podClient := f.PodClient()
podName := "besteffort" + string(uuid.NewUUID()) podName := "besteffort" + string(uuid.NewUUID())
podClient.Create(&api.Pod{ podClient.Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/serve_hostname:v1.4", Image: "gcr.io/google_containers/serve_hostname:v1.4",
Name: podName, Name: podName,
@ -139,17 +139,17 @@ var _ = framework.KubeDescribe("Kubelet Container Manager [Serial]", func() {
It("guaranteed container's oom-score-adj should be -998", func() { It("guaranteed container's oom-score-adj should be -998", func() {
podClient := f.PodClient() podClient := f.PodClient()
podName := "guaranteed" + string(uuid.NewUUID()) podName := "guaranteed" + string(uuid.NewUUID())
podClient.Create(&api.Pod{ podClient.Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/nginx-slim:0.7", Image: "gcr.io/google_containers/nginx-slim:0.7",
Name: podName, Name: podName,
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Limits: api.ResourceList{ Limits: v1.ResourceList{
"cpu": resource.MustParse("100m"), "cpu": resource.MustParse("100m"),
"memory": resource.MustParse("50Mi"), "memory": resource.MustParse("50Mi"),
}, },
@ -180,17 +180,17 @@ var _ = framework.KubeDescribe("Kubelet Container Manager [Serial]", func() {
It("burstable container's oom-score-adj should be between [2, 1000)", func() { It("burstable container's oom-score-adj should be between [2, 1000)", func() {
podClient := f.PodClient() podClient := f.PodClient()
podName := "burstable" + string(uuid.NewUUID()) podName := "burstable" + string(uuid.NewUUID())
podClient.Create(&api.Pod{ podClient.Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/test-webserver:e2e", Image: "gcr.io/google_containers/test-webserver:e2e",
Name: podName, Name: podName,
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"), "cpu": resource.MustParse("100m"),
"memory": resource.MustParse("50Mi"), "memory": resource.MustParse("50Mi"),
}, },

View File

@ -25,8 +25,8 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
@ -429,7 +429,7 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
// createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation. // createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation.
// between creations there is an interval for throughput control // between creations there is an interval for throughput control
func createBatchPodWithRateControl(f *framework.Framework, pods []*api.Pod, interval time.Duration) map[string]unversioned.Time { func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]unversioned.Time {
createTimes := make(map[string]unversioned.Time) createTimes := make(map[string]unversioned.Time)
for _, pod := range pods { for _, pod := range pods {
createTimes[pod.ObjectMeta.Name] = unversioned.Now() createTimes[pod.ObjectMeta.Name] = unversioned.Now()
@ -479,12 +479,12 @@ func verifyPodStartupLatency(expect, actual framework.LatencyMetric) error {
func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]unversioned.Time, func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]unversioned.Time,
podType string) *cache.Controller { podType string) *cache.Controller {
ns := f.Namespace.Name ns := f.Namespace.Name
checkPodRunning := func(p *api.Pod) { checkPodRunning := func(p *v1.Pod) {
mutex.Lock() mutex.Lock()
defer mutex.Unlock() defer mutex.Unlock()
defer GinkgoRecover() defer GinkgoRecover()
if p.Status.Phase == api.PodRunning { if p.Status.Phase == v1.PodRunning {
if _, found := watchTimes[p.Name]; !found { if _, found := watchTimes[p.Name]; !found {
watchTimes[p.Name] = unversioned.Now() watchTimes[p.Name] = unversioned.Now()
} }
@ -493,26 +493,26 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
_, controller := cache.NewInformer( _, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}) options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
obj, err := f.ClientSet.Core().Pods(ns).List(options) obj, err := f.ClientSet.Core().Pods(ns).List(options)
return runtime.Object(obj), err return runtime.Object(obj), err
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}) options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
return f.ClientSet.Core().Pods(ns).Watch(options) return f.ClientSet.Core().Pods(ns).Watch(options)
}, },
}, },
&api.Pod{}, &v1.Pod{},
0, 0,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
p, ok := obj.(*api.Pod) p, ok := obj.(*v1.Pod)
Expect(ok).To(Equal(true)) Expect(ok).To(Equal(true))
go checkPodRunning(p) go checkPodRunning(p)
}, },
UpdateFunc: func(oldObj, newObj interface{}) { UpdateFunc: func(oldObj, newObj interface{}) {
p, ok := newObj.(*api.Pod) p, ok := newObj.(*v1.Pod)
Expect(ok).To(Equal(true)) Expect(ok).To(Equal(true))
go checkPodRunning(p) go checkPodRunning(p)
}, },
@ -522,7 +522,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
} }
// createBatchPodSequential creats pods back-to-back in sequence. // createBatchPodSequential creats pods back-to-back in sequence.
func createBatchPodSequential(f *framework.Framework, pods []*api.Pod) (time.Duration, []framework.PodLatencyData) { func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod) (time.Duration, []framework.PodLatencyData) {
batchStartTime := unversioned.Now() batchStartTime := unversioned.Now()
e2eLags := make([]framework.PodLatencyData, 0) e2eLags := make([]framework.PodLatencyData, 0)
for _, pod := range pods { for _, pod := range pods {

View File

@ -21,13 +21,13 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
) )
const ( const (
@ -72,13 +72,13 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
idlePodName = "idle" + string(uuid.NewUUID()) idlePodName = "idle" + string(uuid.NewUUID())
verifyPodName = "verify" + string(uuid.NewUUID()) verifyPodName = "verify" + string(uuid.NewUUID())
createIdlePod(idlePodName, podClient) createIdlePod(idlePodName, podClient)
podClient.Create(&api.Pod{ podClient.Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: busyPodName, Name: busyPodName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Name: busyPodName, Name: busyPodName,
@ -96,9 +96,9 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
if !isImageSupported() || !evictionOptionIsSet() { // Skip the after each if !isImageSupported() || !evictionOptionIsSet() { // Skip the after each
return return
} }
podClient.DeleteSync(busyPodName, &api.DeleteOptions{}, podDisappearTimeout) podClient.DeleteSync(busyPodName, &v1.DeleteOptions{}, podDisappearTimeout)
podClient.DeleteSync(idlePodName, &api.DeleteOptions{}, podDisappearTimeout) podClient.DeleteSync(idlePodName, &v1.DeleteOptions{}, podDisappearTimeout)
podClient.DeleteSync(verifyPodName, &api.DeleteOptions{}, podDisappearTimeout) podClient.DeleteSync(verifyPodName, &v1.DeleteOptions{}, podDisappearTimeout)
// Wait for 2 container gc loop to ensure that the containers are deleted. The containers // Wait for 2 container gc loop to ensure that the containers are deleted. The containers
// created in this test consume a lot of disk, we don't want them to trigger disk eviction // created in this test consume a lot of disk, we don't want them to trigger disk eviction
@ -140,7 +140,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
return err return err
} }
if podData.Status.Phase != api.PodRunning { if podData.Status.Phase != v1.PodRunning {
err = verifyPodEviction(podData) err = verifyPodEviction(podData)
if err != nil { if err != nil {
return err return err
@ -174,7 +174,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
if err != nil { if err != nil {
return err return err
} }
if podData.Status.Phase != api.PodRunning { if podData.Status.Phase != v1.PodRunning {
return fmt.Errorf("waiting for the new pod to be running") return fmt.Errorf("waiting for the new pod to be running")
} }
@ -186,13 +186,13 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
}) })
func createIdlePod(podName string, podClient *framework.PodClient) { func createIdlePod(podName string, podClient *framework.PodClient) {
podClient.Create(&api.Pod{ podClient.Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: framework.GetPauseImageNameForHostArch(), Image: framework.GetPauseImageNameForHostArch(),
Name: podName, Name: podName,
@ -202,8 +202,8 @@ func createIdlePod(podName string, podClient *framework.PodClient) {
}) })
} }
func verifyPodEviction(podData *api.Pod) error { func verifyPodEviction(podData *v1.Pod) error {
if podData.Status.Phase != api.PodFailed { if podData.Status.Phase != v1.PodFailed {
return fmt.Errorf("expected phase to be failed. got %+v", podData.Status.Phase) return fmt.Errorf("expected phase to be failed. got %+v", podData.Status.Phase)
} }
if podData.Status.Reason != "Evicted" { if podData.Status.Reason != "Evicted" {
@ -215,8 +215,8 @@ func verifyPodEviction(podData *api.Pod) error {
func nodeHasDiskPressure(cs clientset.Interface) bool { func nodeHasDiskPressure(cs clientset.Interface) bool {
nodeList := framework.GetReadySchedulableNodesOrDie(cs) nodeList := framework.GetReadySchedulableNodesOrDie(cs)
for _, condition := range nodeList.Items[0].Status.Conditions { for _, condition := range nodeList.Items[0].Status.Conditions {
if condition.Type == api.NodeDiskPressure { if condition.Type == v1.NodeDiskPressure {
return condition.Status == api.ConditionTrue return condition.Status == v1.ConditionTrue
} }
} }
return false return false

View File

@ -31,8 +31,8 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
commontest "k8s.io/kubernetes/test/e2e/common" commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e_node/services" "k8s.io/kubernetes/test/e2e_node/services"
@ -212,7 +212,7 @@ func waitForNodeReady() {
if err != nil { if err != nil {
return fmt.Errorf("failed to get node: %v", err) return fmt.Errorf("failed to get node: %v", err)
} }
if !api.IsNodeReady(node) { if !v1.IsNodeReady(node) {
return fmt.Errorf("node is not ready: %+v", node) return fmt.Errorf("node is not ready: %+v", node)
} }
return nil return nil
@ -245,8 +245,8 @@ func updateTestContext() error {
} }
// getNode gets node object from the apiserver. // getNode gets node object from the apiserver.
func getNode(c *clientset.Clientset) (*api.Node, error) { func getNode(c *clientset.Clientset) (*v1.Node, error) {
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Nodes().List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.") Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.")
if nodes == nil { if nodes == nil {
return nil, fmt.Errorf("the node list is nil.") return nil, fmt.Errorf("the node list is nil.")

View File

@ -21,7 +21,7 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
docker "k8s.io/kubernetes/pkg/kubelet/dockertools" docker "k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -230,7 +230,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
AfterEach(func() { AfterEach(func() {
for _, pod := range test.testPods { for _, pod := range test.testPods {
By(fmt.Sprintf("Deleting Pod %v", pod.podName)) By(fmt.Sprintf("Deleting Pod %v", pod.podName))
f.PodClient().DeleteSync(pod.podName, &api.DeleteOptions{}, defaultRuntimeRequestTimeoutDuration) f.PodClient().DeleteSync(pod.podName, &v1.DeleteOptions{}, defaultRuntimeRequestTimeoutDuration)
} }
By("Making sure all containers get cleaned up") By("Making sure all containers get cleaned up")
@ -279,12 +279,12 @@ func dockerContainerGCTest(f *framework.Framework, test testRun) {
containerGCTest(f, test) containerGCTest(f, test)
} }
func getPods(specs []*testPodSpec) (pods []*api.Pod) { func getPods(specs []*testPodSpec) (pods []*v1.Pod) {
for _, spec := range specs { for _, spec := range specs {
By(fmt.Sprintf("Creating %v containers with restartCount: %v", spec.numContainers, spec.restartCount)) By(fmt.Sprintf("Creating %v containers with restartCount: %v", spec.numContainers, spec.restartCount))
containers := []api.Container{} containers := []v1.Container{}
for i := 0; i < spec.numContainers; i++ { for i := 0; i < spec.numContainers; i++ {
containers = append(containers, api.Container{ containers = append(containers, v1.Container{
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Name: spec.getContainerName(i), Name: spec.getContainerName(i),
Command: []string{ Command: []string{
@ -299,18 +299,18 @@ func getPods(specs []*testPodSpec) (pods []*api.Pod) {
while true; do sleep 1; done while true; do sleep 1; done
`, i, spec.restartCount+1), `, i, spec.restartCount+1),
}, },
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"}, {MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
}, },
}) })
} }
pods = append(pods, &api.Pod{ pods = append(pods, &v1.Pod{
ObjectMeta: api.ObjectMeta{Name: spec.podName}, ObjectMeta: v1.ObjectMeta{Name: spec.podName},
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: v1.RestartPolicyAlways,
Containers: containers, Containers: containers,
Volumes: []api.Volume{ Volumes: []v1.Volume{
{Name: "test-empty-dir", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, {Name: "test-empty-dir", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
}, },
}, },
}) })

View File

@ -17,7 +17,7 @@ limitations under the License.
package e2e_node package e2e_node
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -33,17 +33,17 @@ var _ = framework.KubeDescribe("ImageID", func() {
f := framework.NewDefaultFramework("image-id-test") f := framework.NewDefaultFramework("image-id-test")
It("should be set to the manifest digest (from RepoDigests) when available", func() { It("should be set to the manifest digest (from RepoDigests) when available", func() {
podDesc := &api.Pod{ podDesc := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-with-repodigest", Name: "pod-with-repodigest",
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{{ Containers: []v1.Container{{
Name: "test", Name: "test",
Image: busyBoxImage, Image: busyBoxImage,
Command: []string{"sh"}, Command: []string{"sh"},
}}, }},
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }

View File

@ -21,8 +21,8 @@ import (
"fmt" "fmt"
"time" "time"
"k8s.io/kubernetes/pkg/api"
apiUnversioned "k8s.io/kubernetes/pkg/api/unversioned" apiUnversioned "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -39,14 +39,14 @@ var _ = framework.KubeDescribe("Kubelet", func() {
Context("when scheduling a busybox command in a pod", func() { Context("when scheduling a busybox command in a pod", func() {
podName := "busybox-scheduling-" + string(uuid.NewUUID()) podName := "busybox-scheduling-" + string(uuid.NewUUID())
It("it should print the output to logs [Conformance]", func() { It("it should print the output to logs [Conformance]", func() {
podClient.CreateSync(&api.Pod{ podClient.CreateSync(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
// Don't restart the Pod since it is expected to exit // Don't restart the Pod since it is expected to exit
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Name: podName, Name: podName,
@ -57,7 +57,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
}) })
Eventually(func() string { Eventually(func() string {
sinceTime := apiUnversioned.NewTime(time.Now().Add(time.Duration(-1 * time.Hour))) sinceTime := apiUnversioned.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))
rc, err := podClient.GetLogs(podName, &api.PodLogOptions{SinceTime: &sinceTime}).Stream() rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream()
if err != nil { if err != nil {
return "" return ""
} }
@ -73,14 +73,14 @@ var _ = framework.KubeDescribe("Kubelet", func() {
BeforeEach(func() { BeforeEach(func() {
podName = "bin-false" + string(uuid.NewUUID()) podName = "bin-false" + string(uuid.NewUUID())
podClient.Create(&api.Pod{ podClient.Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
// Don't restart the Pod since it is expected to exit // Don't restart the Pod since it is expected to exit
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Name: podName, Name: podName,
@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
}) })
It("should be possible to delete", func() { It("should be possible to delete", func() {
err := podClient.Delete(podName, &api.DeleteOptions{}) err := podClient.Delete(podName, &v1.DeleteOptions{})
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err)) Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
}) })
}) })
@ -120,19 +120,19 @@ var _ = framework.KubeDescribe("Kubelet", func() {
podName := "busybox-readonly-fs" + string(uuid.NewUUID()) podName := "busybox-readonly-fs" + string(uuid.NewUUID())
It("it should not write to root filesystem [Conformance]", func() { It("it should not write to root filesystem [Conformance]", func() {
isReadOnly := true isReadOnly := true
podClient.CreateSync(&api.Pod{ podClient.CreateSync(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
// Don't restart the Pod since it is expected to exit // Don't restart the Pod since it is expected to exit
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Name: podName, Name: podName,
Command: []string{"sh", "-c", "echo test > /file; sleep 240"}, Command: []string{"sh", "-c", "echo test > /file; sleep 240"},
SecurityContext: &api.SecurityContext{ SecurityContext: &v1.SecurityContext{
ReadOnlyRootFilesystem: &isReadOnly, ReadOnlyRootFilesystem: &isReadOnly,
}, },
}, },
@ -140,7 +140,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
}, },
}) })
Eventually(func() string { Eventually(func() string {
rc, err := podClient.GetLogs(podName, &api.PodLogOptions{}).Stream() rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream()
if err != nil { if err != nil {
return "" return ""
} }

View File

@ -20,7 +20,7 @@ import (
"fmt" "fmt"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
Context("when it is exec hook", func() { Context("when it is exec hook", func() {
var file string var file string
testPodWithExecHook := func(podWithHook *api.Pod) { testPodWithExecHook := func(podWithHook *v1.Pod) {
podCheckHook := getExecHookTestPod("pod-check-hook", podCheckHook := getExecHookTestPod("pod-check-hook",
// Wait until the file is created. // Wait until the file is created.
[]string{"sh", "-c", fmt.Sprintf("while [ ! -e %s ]; do sleep 1; done", file)}, []string{"sh", "-c", fmt.Sprintf("while [ ! -e %s ]; do sleep 1; done", file)},
@ -59,7 +59,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
podClient.WaitForSuccess(podCheckHook.Name, postStartWaitTimeout) podClient.WaitForSuccess(podCheckHook.Name, postStartWaitTimeout)
} }
By("delete the pod with lifecycle hook") By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, api.NewDeleteOptions(15), podWaitTimeout) podClient.DeleteSync(podWithHook.Name, v1.NewDeleteOptions(15), podWaitTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil { if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
By("create the hook check pod") By("create the hook check pod")
podClient.Create(podCheckHook) podClient.Create(podCheckHook)
@ -84,9 +84,9 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
// Block forever // Block forever
[]string{"tail", "-f", "/dev/null"}, []string{"tail", "-f", "/dev/null"},
) )
podWithHook.Spec.Containers[0].Lifecycle = &api.Lifecycle{ podWithHook.Spec.Containers[0].Lifecycle = &v1.Lifecycle{
PostStart: &api.Handler{ PostStart: &v1.Handler{
Exec: &api.ExecAction{Command: []string{"touch", file}}, Exec: &v1.ExecAction{Command: []string{"touch", file}},
}, },
} }
testPodWithExecHook(podWithHook) testPodWithExecHook(podWithHook)
@ -97,9 +97,9 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
// Block forever // Block forever
[]string{"tail", "-f", "/dev/null"}, []string{"tail", "-f", "/dev/null"},
) )
podWithHook.Spec.Containers[0].Lifecycle = &api.Lifecycle{ podWithHook.Spec.Containers[0].Lifecycle = &v1.Lifecycle{
PreStop: &api.Handler{ PreStop: &v1.Handler{
Exec: &api.ExecAction{Command: []string{"touch", file}}, Exec: &v1.ExecAction{Command: []string{"touch", file}},
}, },
} }
testPodWithExecHook(podWithHook) testPodWithExecHook(podWithHook)
@ -108,19 +108,19 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
Context("when it is http hook", func() { Context("when it is http hook", func() {
var targetIP string var targetIP string
podHandleHookRequest := &api.Pod{ podHandleHookRequest := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-handle-http-request", Name: "pod-handle-http-request",
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "pod-handle-http-request", Name: "pod-handle-http-request",
Image: "gcr.io/google_containers/netexec:1.7", Image: "gcr.io/google_containers/netexec:1.7",
Ports: []api.ContainerPort{ Ports: []v1.ContainerPort{
{ {
ContainerPort: 8080, ContainerPort: 8080,
Protocol: api.ProtocolTCP, Protocol: v1.ProtocolTCP,
}, },
}, },
}, },
@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
newPod := podClient.CreateSync(podHandleHookRequest) newPod := podClient.CreateSync(podHandleHookRequest)
targetIP = newPod.Status.PodIP targetIP = newPod.Status.PodIP
}) })
testPodWithHttpHook := func(podWithHook *api.Pod) { testPodWithHttpHook := func(podWithHook *v1.Pod) {
By("create the pod with lifecycle hook") By("create the pod with lifecycle hook")
podClient.CreateSync(podWithHook) podClient.CreateSync(podWithHook)
if podWithHook.Spec.Containers[0].Lifecycle.PostStart != nil { if podWithHook.Spec.Containers[0].Lifecycle.PostStart != nil {
@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
}, postStartWaitTimeout, podCheckInterval).Should(BeNil()) }, postStartWaitTimeout, podCheckInterval).Should(BeNil())
} }
By("delete the pod with lifecycle hook") By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, api.NewDeleteOptions(15), podWaitTimeout) podClient.DeleteSync(podWithHook.Name, v1.NewDeleteOptions(15), podWaitTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil { if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
By("check prestop hook") By("check prestop hook")
Eventually(func() error { Eventually(func() error {
@ -153,18 +153,18 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
} }
} }
It("should execute poststart http hook properly [Conformance]", func() { It("should execute poststart http hook properly [Conformance]", func() {
podWithHook := &api.Pod{ podWithHook := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-with-poststart-http-hook", Name: "pod-with-poststart-http-hook",
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "pod-with-poststart-http-hook", Name: "pod-with-poststart-http-hook",
Image: framework.GetPauseImageNameForHostArch(), Image: framework.GetPauseImageNameForHostArch(),
Lifecycle: &api.Lifecycle{ Lifecycle: &v1.Lifecycle{
PostStart: &api.Handler{ PostStart: &v1.Handler{
HTTPGet: &api.HTTPGetAction{ HTTPGet: &v1.HTTPGetAction{
Path: "/echo?msg=poststart", Path: "/echo?msg=poststart",
Host: targetIP, Host: targetIP,
Port: intstr.FromInt(8080), Port: intstr.FromInt(8080),
@ -178,18 +178,18 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
testPodWithHttpHook(podWithHook) testPodWithHttpHook(podWithHook)
}) })
It("should execute prestop http hook properly [Conformance]", func() { It("should execute prestop http hook properly [Conformance]", func() {
podWithHook := &api.Pod{ podWithHook := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-with-prestop-http-hook", Name: "pod-with-prestop-http-hook",
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "pod-with-prestop-http-hook", Name: "pod-with-prestop-http-hook",
Image: framework.GetPauseImageNameForHostArch(), Image: framework.GetPauseImageNameForHostArch(),
Lifecycle: &api.Lifecycle{ Lifecycle: &v1.Lifecycle{
PreStop: &api.Handler{ PreStop: &v1.Handler{
HTTPGet: &api.HTTPGetAction{ HTTPGet: &v1.HTTPGetAction{
Path: "/echo?msg=prestop", Path: "/echo?msg=prestop",
Host: targetIP, Host: targetIP,
Port: intstr.FromInt(8080), Port: intstr.FromInt(8080),
@ -206,17 +206,17 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
}) })
}) })
func getExecHookTestPod(name string, cmd []string) *api.Pod { func getExecHookTestPod(name string, cmd []string) *v1.Pod {
return &api.Pod{ return &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: name, Name: name,
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "tmpfs", Name: "tmpfs",
MountPath: "/tmp", MountPath: "/tmp",
@ -225,11 +225,11 @@ func getExecHookTestPod(name string, cmd []string) *api.Pod {
Command: cmd, Command: cmd,
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "tmpfs", Name: "tmpfs",
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/tmp"}}, VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/tmp"}},
}, },
}, },
}, },

View File

@ -17,7 +17,7 @@ limitations under the License.
package e2e_node package e2e_node
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/kubelet"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -44,14 +44,14 @@ var _ = framework.KubeDescribe("ContainerLogPath", func() {
checkPodName := "checker" + string(uuid.NewUUID()) checkPodName := "checker" + string(uuid.NewUUID())
checkContName := "checker-c-" + string(uuid.NewUUID()) checkContName := "checker-c-" + string(uuid.NewUUID())
logPod := &api.Pod{ logPod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: logPodName, Name: logPodName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
// this pod is expected to exit successfully // this pod is expected to exit successfully
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Name: logContName, Name: logContName,
@ -72,21 +72,21 @@ var _ = framework.KubeDescribe("ContainerLogPath", func() {
expectedlogFile := logDir + "/" + logPodName + "_" + ns + "_" + logContName + "-" + logConID.ID + ".log" expectedlogFile := logDir + "/" + logPodName + "_" + ns + "_" + logContName + "-" + logConID.ID + ".log"
checkPod := &api.Pod{ checkPod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: checkPodName, Name: checkPodName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
// this pod is expected to exit successfully // this pod is expected to exit successfully
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Name: checkContName, Name: checkContName,
// If we find expected log file and contains right content, exit 0 // If we find expected log file and contains right content, exit 0
// else, keep checking until test timeout // else, keep checking until test timeout
Command: []string{"sh", "-c", "while true; do if [ -e " + expectedlogFile + " ] && grep -q " + logString + " " + expectedlogFile + "; then exit 0; fi; sleep 1; done"}, Command: []string{"sh", "-c", "while true; do if [ -e " + expectedlogFile + " ] && grep -q " + logString + " " + expectedlogFile + "; then exit 0; fi; sleep 1; done"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: logDirVolumeName, Name: logDirVolumeName,
// mount ContainerLogsDir to the same path in container // mount ContainerLogsDir to the same path in container
@ -96,11 +96,11 @@ var _ = framework.KubeDescribe("ContainerLogPath", func() {
}, },
}, },
}, },
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: logDirVolumeName, Name: logDirVolumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
HostPath: &api.HostPathVolumeSource{ HostPath: &v1.HostPathVolumeSource{
Path: expectedlogFile, Path: expectedlogFile,
}, },
}, },

View File

@ -22,8 +22,8 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
// Wait for the memory pressure condition to disappear from the node status before continuing. // Wait for the memory pressure condition to disappear from the node status before continuing.
By("waiting for the memory pressure condition on the node to disappear before ending the test.") By("waiting for the memory pressure condition on the node to disappear before ending the test.")
Eventually(func() error { Eventually(func() error {
nodeList, err := f.ClientSet.Core().Nodes().List(api.ListOptions{}) nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
if err != nil { if err != nil {
return fmt.Errorf("tried to get node list but got error: %v", err) return fmt.Errorf("tried to get node list but got error: %v", err)
} }
@ -59,8 +59,8 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
return fmt.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items) return fmt.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items)
} }
node := nodeList.Items[0] node := nodeList.Items[0]
_, pressure := api.GetNodeCondition(&node.Status, api.NodeMemoryPressure) _, pressure := v1.GetNodeCondition(&node.Status, v1.NodeMemoryPressure)
if pressure != nil && pressure.Status == api.ConditionTrue { if pressure != nil && pressure.Status == v1.ConditionTrue {
return fmt.Errorf("node is still reporting memory pressure condition: %s", pressure) return fmt.Errorf("node is still reporting memory pressure condition: %s", pressure)
} }
return nil return nil
@ -104,13 +104,13 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
// Finally, try starting a new pod and wait for it to be scheduled and running. // Finally, try starting a new pod and wait for it to be scheduled and running.
// This is the final check to try to prevent interference with subsequent tests. // This is the final check to try to prevent interference with subsequent tests.
podName := "admit-best-effort-pod" podName := "admit-best-effort-pod"
f.PodClient().CreateSync(&api.Pod{ f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: framework.GetPauseImageNameForHostArch(), Image: framework.GetPauseImageNameForHostArch(),
Name: podName, Name: podName,
@ -124,25 +124,25 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
By("creating a guaranteed pod, a burstable pod, and a besteffort pod.") By("creating a guaranteed pod, a burstable pod, and a besteffort pod.")
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
guaranteed := createMemhogPod(f, "guaranteed-", "guaranteed", api.ResourceRequirements{ guaranteed := createMemhogPod(f, "guaranteed-", "guaranteed", v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"), "cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"), "memory": resource.MustParse("100Mi"),
}, },
Limits: api.ResourceList{ Limits: v1.ResourceList{
"cpu": resource.MustParse("100m"), "cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"), "memory": resource.MustParse("100Mi"),
}}) }})
// A pod is burstable if limits and requests do not match across all containers. // A pod is burstable if limits and requests do not match across all containers.
burstable := createMemhogPod(f, "burstable-", "burstable", api.ResourceRequirements{ burstable := createMemhogPod(f, "burstable-", "burstable", v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"), "cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"), "memory": resource.MustParse("100Mi"),
}}) }})
// A pod is besteffort if none of its containers have specified any requests or limits. // A pod is besteffort if none of its containers have specified any requests or limits.
besteffort := createMemhogPod(f, "besteffort-", "besteffort", api.ResourceRequirements{}) besteffort := createMemhogPod(f, "besteffort-", "besteffort", v1.ResourceRequirements{})
// We poll until timeout or all pods are killed. // We poll until timeout or all pods are killed.
// Inside the func, we check that all pods are in a valid phase with // Inside the func, we check that all pods are in a valid phase with
@ -174,7 +174,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
// see the eviction manager reporting a pressure condition for a while without the besteffort failing, // see the eviction manager reporting a pressure condition for a while without the besteffort failing,
// and we see that the manager did in fact evict the besteffort (this should be in the Kubelet log), we // and we see that the manager did in fact evict the besteffort (this should be in the Kubelet log), we
// will have more reason to believe the phase is out of date. // will have more reason to believe the phase is out of date.
nodeList, err := f.ClientSet.Core().Nodes().List(api.ListOptions{}) nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
if err != nil { if err != nil {
glog.Errorf("tried to get node list but got error: %v", err) glog.Errorf("tried to get node list but got error: %v", err)
} }
@ -182,7 +182,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
glog.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items) glog.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items)
} }
node := nodeList.Items[0] node := nodeList.Items[0]
_, pressure := api.GetNodeCondition(&node.Status, api.NodeMemoryPressure) _, pressure := v1.GetNodeCondition(&node.Status, v1.NodeMemoryPressure)
glog.Infof("node pressure condition: %s", pressure) glog.Infof("node pressure condition: %s", pressure)
// NOTE/TODO(mtaufen): Also log (at least temporarily) the actual memory consumption on the node. // NOTE/TODO(mtaufen): Also log (at least temporarily) the actual memory consumption on the node.
@ -198,15 +198,15 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
} }
if bestPh == api.PodRunning { if bestPh == v1.PodRunning {
Expect(burstPh).NotTo(Equal(api.PodFailed), "burstable pod failed before best effort pod") Expect(burstPh).NotTo(Equal(v1.PodFailed), "burstable pod failed before best effort pod")
Expect(gteedPh).NotTo(Equal(api.PodFailed), "guaranteed pod failed before best effort pod") Expect(gteedPh).NotTo(Equal(v1.PodFailed), "guaranteed pod failed before best effort pod")
} else if burstPh == api.PodRunning { } else if burstPh == v1.PodRunning {
Expect(gteedPh).NotTo(Equal(api.PodFailed), "guaranteed pod failed before burstable pod") Expect(gteedPh).NotTo(Equal(v1.PodFailed), "guaranteed pod failed before burstable pod")
} }
// When both besteffort and burstable have been evicted, the test has completed. // When both besteffort and burstable have been evicted, the test has completed.
if bestPh == api.PodFailed && burstPh == api.PodFailed { if bestPh == v1.PodFailed && burstPh == v1.PodFailed {
return nil return nil
} }
return fmt.Errorf("besteffort and burstable have not yet both been evicted.") return fmt.Errorf("besteffort and burstable have not yet both been evicted.")
@ -219,12 +219,12 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
}) })
func createMemhogPod(f *framework.Framework, genName string, ctnName string, res api.ResourceRequirements) *api.Pod { func createMemhogPod(f *framework.Framework, genName string, ctnName string, res v1.ResourceRequirements) *v1.Pod {
env := []api.EnvVar{ env := []v1.EnvVar{
{ {
Name: "MEMORY_LIMIT", Name: "MEMORY_LIMIT",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &api.ResourceFieldSelector{ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.memory", Resource: "limits.memory",
}, },
}, },
@ -243,13 +243,13 @@ func createMemhogPod(f *framework.Framework, genName string, ctnName string, res
memLimit = "$(MEMORY_LIMIT)" memLimit = "$(MEMORY_LIMIT)"
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
GenerateName: genName, GenerateName: genName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: ctnName, Name: ctnName,
Image: "gcr.io/google-containers/stress:v1", Image: "gcr.io/google-containers/stress:v1",

View File

@ -23,9 +23,9 @@ import (
"path/filepath" "path/filepath"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -47,7 +47,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
By("create the static pod") By("create the static pod")
err := createStaticPod(manifestPath, staticPodName, ns, err := createStaticPod(manifestPath, staticPodName, ns,
"gcr.io/google_containers/nginx-slim:0.7", api.RestartPolicyAlways) "gcr.io/google_containers/nginx-slim:0.7", v1.RestartPolicyAlways)
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
By("wait for the mirror pod to be running") By("wait for the mirror pod to be running")
@ -63,7 +63,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
By("update the static pod container image") By("update the static pod container image")
image := framework.GetPauseImageNameForHostArch() image := framework.GetPauseImageNameForHostArch()
err = createStaticPod(manifestPath, staticPodName, ns, image, api.RestartPolicyAlways) err = createStaticPod(manifestPath, staticPodName, ns, image, v1.RestartPolicyAlways)
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
By("wait for the mirror pod to be updated") By("wait for the mirror pod to be updated")
@ -84,7 +84,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
uid := pod.UID uid := pod.UID
By("delete the mirror pod with grace period 30s") By("delete the mirror pod with grace period 30s")
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, api.NewDeleteOptions(30)) err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, v1.NewDeleteOptions(30))
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
By("wait for the mirror pod to be recreated") By("wait for the mirror pod to be recreated")
@ -99,7 +99,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
uid := pod.UID uid := pod.UID
By("delete the mirror pod with grace period 0s") By("delete the mirror pod with grace period 0s")
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, v1.NewDeleteOptions(0))
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
By("wait for the mirror pod to be recreated") By("wait for the mirror pod to be recreated")
@ -124,7 +124,7 @@ func staticPodPath(dir, name, namespace string) string {
return filepath.Join(dir, namespace+"-"+name+".yaml") return filepath.Join(dir, namespace+"-"+name+".yaml")
} }
func createStaticPod(dir, name, namespace, image string, restart api.RestartPolicy) error { func createStaticPod(dir, name, namespace, image string, restart v1.RestartPolicy) error {
template := ` template := `
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
@ -168,7 +168,7 @@ func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error
if err != nil { if err != nil {
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
} }
if pod.Status.Phase != api.PodRunning { if pod.Status.Phase != v1.PodRunning {
return fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase) return fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase)
} }
return nil return nil
@ -182,7 +182,7 @@ func checkMirrorPodRecreatedAndRunnig(cl clientset.Interface, name, namespace st
if pod.UID == oUID { if pod.UID == oUID {
return fmt.Errorf("expected the uid of mirror pod %q to be changed, got %q", name, pod.UID) return fmt.Errorf("expected the uid of mirror pod %q to be changed, got %q", name, pod.UID)
} }
if pod.Status.Phase != api.PodRunning { if pod.Status.Phase != v1.PodRunning {
return fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase) return fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase)
} }
return nil return nil

View File

@ -34,7 +34,7 @@ import (
cadvisorclient "github.com/google/cadvisor/client/v2" cadvisorclient "github.com/google/cadvisor/client/v2"
cadvisorapiv2 "github.com/google/cadvisor/info/v2" cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/procfs" "k8s.io/kubernetes/pkg/util/procfs"
@ -292,30 +292,29 @@ func formatCPUSummary(summary framework.ContainersCPUSummary) string {
} }
// createCadvisorPod creates a standalone cadvisor pod for fine-grain resource monitoring. // createCadvisorPod creates a standalone cadvisor pod for fine-grain resource monitoring.
func getCadvisorPod() *api.Pod { func getCadvisorPod() *v1.Pod {
return &api.Pod{ return &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: cadvisorPodName, Name: cadvisorPodName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
// It uses a host port for the tests to collect data. // It uses a host port for the tests to collect data.
// Currently we can not use port mapping in test-e2e-node. // Currently we can not use port mapping in test-e2e-node.
SecurityContext: &api.PodSecurityContext{
HostNetwork: true, HostNetwork: true,
}, SecurityContext: &v1.PodSecurityContext{},
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: cadvisorImageName, Image: cadvisorImageName,
Name: cadvisorPodName, Name: cadvisorPodName,
Ports: []api.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "http", Name: "http",
HostPort: cadvisorPort, HostPort: cadvisorPort,
ContainerPort: cadvisorPort, ContainerPort: cadvisorPort,
Protocol: api.ProtocolTCP, Protocol: v1.ProtocolTCP,
}, },
}, },
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "sys", Name: "sys",
ReadOnly: true, ReadOnly: true,
@ -344,22 +343,22 @@ func getCadvisorPod() *api.Pod {
}, },
}, },
}, },
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "rootfs", Name: "rootfs",
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/"}}, VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/"}},
}, },
{ {
Name: "var-run", Name: "var-run",
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/var/run"}}, VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/var/run"}},
}, },
{ {
Name: "sys", Name: "sys",
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/sys"}}, VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/sys"}},
}, },
{ {
Name: "docker", Name: "docker",
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/var/lib/docker"}}, VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/var/lib/docker"}},
}, },
}, },
}, },
@ -367,14 +366,14 @@ func getCadvisorPod() *api.Pod {
} }
// deletePodsSync deletes a list of pods and block until pods disappear. // deletePodsSync deletes a list of pods and block until pods disappear.
func deletePodsSync(f *framework.Framework, pods []*api.Pod) { func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
var wg sync.WaitGroup var wg sync.WaitGroup
for _, pod := range pods { for _, pod := range pods {
wg.Add(1) wg.Add(1)
go func(pod *api.Pod) { go func(pod *v1.Pod) {
defer wg.Done() defer wg.Done()
err := f.PodClient().Delete(pod.ObjectMeta.Name, api.NewDeleteOptions(30)) err := f.PodClient().Delete(pod.ObjectMeta.Name, v1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
@ -386,8 +385,8 @@ func deletePodsSync(f *framework.Framework, pods []*api.Pod) {
} }
// newTestPods creates a list of pods (specification) for test. // newTestPods creates a list of pods (specification) for test.
func newTestPods(numPods int, imageName, podType string) []*api.Pod { func newTestPods(numPods int, imageName, podType string) []*v1.Pod {
var pods []*api.Pod var pods []*v1.Pod
for i := 0; i < numPods; i++ { for i := 0; i < numPods; i++ {
podName := "test-" + string(uuid.NewUUID()) podName := "test-" + string(uuid.NewUUID())
labels := map[string]string{ labels := map[string]string{
@ -395,14 +394,14 @@ func newTestPods(numPods int, imageName, podType string) []*api.Pod {
"name": podName, "name": podName,
} }
pods = append(pods, pods = append(pods,
&api.Pod{ &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Labels: labels, Labels: labels,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
// Restart policy is always (default). // Restart policy is always (default).
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: imageName, Image: imageName,
Name: podName, Name: podName,

View File

@ -23,7 +23,7 @@ import (
"strings" "strings"
"time" "time"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"

View File

@ -19,27 +19,29 @@ limitations under the License.
package e2e_node package e2e_node
import ( import (
"k8s.io/kubernetes/test/e2e/framework"
"time" "time"
"k8s.io/kubernetes/test/e2e/framework"
"fmt" "fmt"
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/pkg/api"
testutils "k8s.io/kubernetes/test/utils"
"os/exec" "os/exec"
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/pkg/api/v1"
testutils "k8s.io/kubernetes/test/utils"
) )
// waitForPods waits for timeout duration, for pod_count. // waitForPods waits for timeout duration, for pod_count.
// If the timeout is hit, it returns the list of currently running pods. // If the timeout is hit, it returns the list of currently running pods.
func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (runningPods []*api.Pod) { func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (runningPods []*v1.Pod) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
podList, err := f.PodClient().List(api.ListOptions{}) podList, err := f.PodClient().List(v1.ListOptions{})
if err != nil { if err != nil {
framework.Logf("Failed to list pods on node: %v", err) framework.Logf("Failed to list pods on node: %v", err)
continue continue
} }
runningPods = []*api.Pod{} runningPods = []*v1.Pod{}
for _, pod := range podList.Items { for _, pod := range podList.Items {
if r, err := testutils.PodRunningReady(&pod); err != nil || !r { if r, err := testutils.PodRunningReady(&pod); err != nil || !r {
continue continue

View File

@ -21,7 +21,7 @@ import (
"path" "path"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -44,34 +44,34 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
It("it should run with the expected status [Conformance]", func() { It("it should run with the expected status [Conformance]", func() {
restartCountVolumeName := "restart-count" restartCountVolumeName := "restart-count"
restartCountVolumePath := "/restart-count" restartCountVolumePath := "/restart-count"
testContainer := api.Container{ testContainer := v1.Container{
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
MountPath: restartCountVolumePath, MountPath: restartCountVolumePath,
Name: restartCountVolumeName, Name: restartCountVolumeName,
}, },
}, },
} }
testVolumes := []api.Volume{ testVolumes := []v1.Volume{
{ {
Name: restartCountVolumeName, Name: restartCountVolumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}, EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
}, },
}, },
} }
testCases := []struct { testCases := []struct {
Name string Name string
RestartPolicy api.RestartPolicy RestartPolicy v1.RestartPolicy
Phase api.PodPhase Phase v1.PodPhase
State ContainerState State ContainerState
RestartCount int32 RestartCount int32
Ready bool Ready bool
}{ }{
{"terminate-cmd-rpa", api.RestartPolicyAlways, api.PodRunning, ContainerStateRunning, 2, true}, {"terminate-cmd-rpa", v1.RestartPolicyAlways, v1.PodRunning, ContainerStateRunning, 2, true},
{"terminate-cmd-rpof", api.RestartPolicyOnFailure, api.PodSucceeded, ContainerStateTerminated, 1, false}, {"terminate-cmd-rpof", v1.RestartPolicyOnFailure, v1.PodSucceeded, ContainerStateTerminated, 1, false},
{"terminate-cmd-rpn", api.RestartPolicyNever, api.PodFailed, ContainerStateTerminated, 0, false}, {"terminate-cmd-rpn", v1.RestartPolicyNever, v1.PodFailed, ContainerStateTerminated, 0, false},
} }
for _, testCase := range testCases { for _, testCase := range testCases {
@ -95,8 +95,8 @@ while true; do sleep 1; done
Container: testContainer, Container: testContainer,
RestartPolicy: testCase.RestartPolicy, RestartPolicy: testCase.RestartPolicy,
Volumes: testVolumes, Volumes: testVolumes,
PodSecurityContext: &api.PodSecurityContext{ PodSecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &api.SELinuxOptions{ SELinuxOptions: &v1.SELinuxOptions{
Level: "s0", Level: "s0",
}, },
}, },
@ -135,17 +135,17 @@ while true; do sleep 1; done
priv := true priv := true
c := ConformanceContainer{ c := ConformanceContainer{
PodClient: f.PodClient(), PodClient: f.PodClient(),
Container: api.Container{ Container: v1.Container{
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Name: name, Name: name,
Command: []string{"/bin/sh", "-c"}, Command: []string{"/bin/sh", "-c"},
Args: []string{fmt.Sprintf("/bin/echo -n %s > %s", terminationMessage, terminationMessagePath)}, Args: []string{fmt.Sprintf("/bin/echo -n %s > %s", terminationMessage, terminationMessagePath)},
TerminationMessagePath: terminationMessagePath, TerminationMessagePath: terminationMessagePath,
SecurityContext: &api.SecurityContext{ SecurityContext: &v1.SecurityContext{
Privileged: &priv, Privileged: &priv,
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
} }
By("create the container") By("create the container")
@ -153,7 +153,7 @@ while true; do sleep 1; done
defer c.Delete() defer c.Delete()
By("wait for the container to succeed") By("wait for the container to succeed")
Eventually(c.GetPhase, retryTimeout, pollInterval).Should(Equal(api.PodSucceeded)) Eventually(c.GetPhase, retryTimeout, pollInterval).Should(Equal(v1.PodSucceeded))
By("get the container status") By("get the container status")
status, err := c.GetStatus() status, err := c.GetStatus()
@ -181,55 +181,55 @@ while true; do sleep 1; done
} }
} }
}` }`
secret := &api.Secret{ secret := &v1.Secret{
Data: map[string][]byte{api.DockerConfigJsonKey: []byte(auth)}, Data: map[string][]byte{v1.DockerConfigJsonKey: []byte(auth)},
Type: api.SecretTypeDockerConfigJson, Type: v1.SecretTypeDockerConfigJson,
} }
// The following images are not added into NodeImageWhiteList, because this test is // The following images are not added into NodeImageWhiteList, because this test is
// testing image pulling, these images don't need to be prepulled. The ImagePullPolicy // testing image pulling, these images don't need to be prepulled. The ImagePullPolicy
// is api.PullAlways, so it won't be blocked by framework image white list check. // is v1.PullAlways, so it won't be blocked by framework image white list check.
for _, testCase := range []struct { for _, testCase := range []struct {
description string description string
image string image string
secret bool secret bool
phase api.PodPhase phase v1.PodPhase
waiting bool waiting bool
}{ }{
{ {
description: "should not be able to pull image from invalid registry", description: "should not be able to pull image from invalid registry",
image: "invalid.com/invalid/alpine:3.1", image: "invalid.com/invalid/alpine:3.1",
phase: api.PodPending, phase: v1.PodPending,
waiting: true, waiting: true,
}, },
{ {
description: "should not be able to pull non-existing image from gcr.io", description: "should not be able to pull non-existing image from gcr.io",
image: "gcr.io/google_containers/invalid-image:invalid-tag", image: "gcr.io/google_containers/invalid-image:invalid-tag",
phase: api.PodPending, phase: v1.PodPending,
waiting: true, waiting: true,
}, },
{ {
description: "should be able to pull image from gcr.io", description: "should be able to pull image from gcr.io",
image: "gcr.io/google_containers/alpine-with-bash:1.0", image: "gcr.io/google_containers/alpine-with-bash:1.0",
phase: api.PodRunning, phase: v1.PodRunning,
waiting: false, waiting: false,
}, },
{ {
description: "should be able to pull image from docker hub", description: "should be able to pull image from docker hub",
image: "alpine:3.1", image: "alpine:3.1",
phase: api.PodRunning, phase: v1.PodRunning,
waiting: false, waiting: false,
}, },
{ {
description: "should not be able to pull from private registry without secret", description: "should not be able to pull from private registry without secret",
image: "gcr.io/authenticated-image-pulling/alpine:3.1", image: "gcr.io/authenticated-image-pulling/alpine:3.1",
phase: api.PodPending, phase: v1.PodPending,
waiting: true, waiting: true,
}, },
{ {
description: "should be able to pull from private registry with secret", description: "should be able to pull from private registry with secret",
image: "gcr.io/authenticated-image-pulling/alpine:3.1", image: "gcr.io/authenticated-image-pulling/alpine:3.1",
secret: true, secret: true,
phase: api.PodRunning, phase: v1.PodRunning,
waiting: false, waiting: false,
}, },
} { } {
@ -239,14 +239,14 @@ while true; do sleep 1; done
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"} command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
container := ConformanceContainer{ container := ConformanceContainer{
PodClient: f.PodClient(), PodClient: f.PodClient(),
Container: api.Container{ Container: v1.Container{
Name: name, Name: name,
Image: testCase.image, Image: testCase.image,
Command: command, Command: command,
// PullAlways makes sure that the image will always be pulled even if it is present before the test. // PullAlways makes sure that the image will always be pulled even if it is present before the test.
ImagePullPolicy: api.PullAlways, ImagePullPolicy: v1.PullAlways,
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
} }
if testCase.secret { if testCase.secret {
secret.Name = "image-pull-secret-" + string(uuid.NewUUID()) secret.Name = "image-pull-secret-" + string(uuid.NewUUID())

View File

@ -19,9 +19,9 @@ package services
import ( import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace" namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
@ -57,7 +57,7 @@ func (n *NamespaceController) Start() error {
} }
clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc) clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
gvrFn := client.Discovery().ServerPreferredNamespacedResources gvrFn := client.Discovery().ServerPreferredNamespacedResources
nc := namespacecontroller.NewNamespaceController(client, clientPool, gvrFn, ncResyncPeriod, api.FinalizerKubernetes) nc := namespacecontroller.NewNamespaceController(client, clientPool, gvrFn, ncResyncPeriod, v1.FinalizerKubernetes)
go nc.Run(ncConcurrency, n.stopCh) go nc.Run(ncConcurrency, n.stopCh)
return nil return nil
} }

View File

@ -17,8 +17,8 @@ limitations under the License.
package e2e_node package e2e_node
import ( import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -30,20 +30,20 @@ var _ = framework.KubeDescribe("SimpleMount", func() {
// This is a very simple test that exercises the Kubelet's mounter code path. // This is a very simple test that exercises the Kubelet's mounter code path.
// If the mount fails, the pod will not be able to run, and CreateSync will timeout. // If the mount fails, the pod will not be able to run, and CreateSync will timeout.
It("should be able to mount an emptydir on a container", func() { It("should be able to mount an emptydir on a container", func() {
pod := &api.Pod{ pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
APIVersion: "v1", APIVersion: "v1",
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "simple-mount-pod", Name: "simple-mount-pod",
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "simple-mount-container", Name: "simple-mount-container",
Image: framework.GetPauseImageNameForHostArch(), Image: framework.GetPauseImageNameForHostArch(),
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "simply-mounted-volume", Name: "simply-mounted-volume",
MountPath: "/opt/", MountPath: "/opt/",
@ -51,11 +51,11 @@ var _ = framework.KubeDescribe("SimpleMount", func() {
}, },
}, },
}, },
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "simply-mounted-volume", Name: "simply-mounted-volume",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{
Medium: "Memory", Medium: "Memory",
}, },
}, },

View File

@ -20,9 +20,9 @@ import (
"fmt" "fmt"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -211,39 +211,39 @@ var _ = framework.KubeDescribe("Summary API", func() {
}) })
func createSummaryTestPods(f *framework.Framework, names ...string) { func createSummaryTestPods(f *framework.Framework, names ...string) {
pods := make([]*api.Pod, 0, len(names)) pods := make([]*v1.Pod, 0, len(names))
for _, name := range names { for _, name := range names {
pods = append(pods, &api.Pod{ pods = append(pods, &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: v1.RestartPolicyAlways,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "busybox-container", Name: "busybox-container",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "ping -c 1 google.com; while true; do echo 'hello world' >> /test-empty-dir-mnt/file ; sleep 1; done"}, Command: []string{"sh", "-c", "ping -c 1 google.com; while true; do echo 'hello world' >> /test-empty-dir-mnt/file ; sleep 1; done"},
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Limits: api.ResourceList{ Limits: v1.ResourceList{
// Must set memory limit to get MemoryStats.AvailableBytes // Must set memory limit to get MemoryStats.AvailableBytes
api.ResourceMemory: resource.MustParse("10M"), v1.ResourceMemory: resource.MustParse("10M"),
}, },
}, },
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"}, {MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
}, },
}, },
}, },
SecurityContext: &api.PodSecurityContext{ SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &api.SELinuxOptions{ SELinuxOptions: &v1.SELinuxOptions{
Level: "s0", Level: "s0",
}, },
}, },
Volumes: []api.Volume{ Volumes: []v1.Volume{
// TODO(#28393): Test secret volumes // TODO(#28393): Test secret volumes
// TODO(#28394): Test hostpath volumes // TODO(#28394): Test hostpath volumes
{Name: "test-empty-dir", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, {Name: "test-empty-dir", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
}, },
}, },
}) })

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
k8serr "k8s.io/kubernetes/pkg/api/errors" k8serr "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/apis/componentconfig"
v1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" v1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
@ -84,7 +85,7 @@ func getCurrentKubeletConfig() (*componentconfig.KubeletConfiguration, error) {
} }
// Queries the API server for a Kubelet configuration for the node described by framework.TestContext.NodeName // Queries the API server for a Kubelet configuration for the node described by framework.TestContext.NodeName
func getCurrentKubeletConfigMap(f *framework.Framework) (*api.ConfigMap, error) { func getCurrentKubeletConfigMap(f *framework.Framework) (*v1.ConfigMap, error) {
return f.ClientSet.Core().ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", framework.TestContext.NodeName)) return f.ClientSet.Core().ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", framework.TestContext.NodeName))
} }
@ -195,15 +196,15 @@ func decodeConfigz(resp *http.Response) (*componentconfig.KubeletConfiguration,
} }
// Constructs a Kubelet ConfigMap targeting the current node running the node e2e tests // Constructs a Kubelet ConfigMap targeting the current node running the node e2e tests
func makeKubeletConfigMap(nodeName string, kubeCfg *componentconfig.KubeletConfiguration) *api.ConfigMap { func makeKubeletConfigMap(nodeName string, kubeCfg *componentconfig.KubeletConfiguration) *v1.ConfigMap {
kubeCfgExt := v1alpha1.KubeletConfiguration{} kubeCfgExt := v1alpha1.KubeletConfiguration{}
api.Scheme.Convert(kubeCfg, &kubeCfgExt, nil) api.Scheme.Convert(kubeCfg, &kubeCfgExt, nil)
bytes, err := json.Marshal(kubeCfgExt) bytes, err := json.Marshal(kubeCfgExt)
framework.ExpectNoError(err) framework.ExpectNoError(err)
cmap := &api.ConfigMap{ cmap := &v1.ConfigMap{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("kubelet-%s", nodeName), Name: fmt.Sprintf("kubelet-%s", nodeName),
}, },
Data: map[string]string{ Data: map[string]string{
@ -214,7 +215,7 @@ func makeKubeletConfigMap(nodeName string, kubeCfg *componentconfig.KubeletConfi
} }
// Uses KubeletConfiguration to create a `kubelet-<node-name>` ConfigMap in the "kube-system" namespace. // Uses KubeletConfiguration to create a `kubelet-<node-name>` ConfigMap in the "kube-system" namespace.
func createConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletConfiguration) (*api.ConfigMap, error) { func createConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletConfiguration) (*v1.ConfigMap, error) {
cmap := makeKubeletConfigMap(framework.TestContext.NodeName, kubeCfg) cmap := makeKubeletConfigMap(framework.TestContext.NodeName, kubeCfg)
cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Create(cmap) cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Create(cmap)
if err != nil { if err != nil {
@ -224,7 +225,7 @@ func createConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletCon
} }
// Similar to createConfigMap, except this updates an existing ConfigMap. // Similar to createConfigMap, except this updates an existing ConfigMap.
func updateConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletConfiguration) (*api.ConfigMap, error) { func updateConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletConfiguration) (*v1.ConfigMap, error) {
cmap := makeKubeletConfigMap(framework.TestContext.NodeName, kubeCfg) cmap := makeKubeletConfigMap(framework.TestContext.NodeName, kubeCfg)
cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Update(cmap) cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Update(cmap)
if err != nil { if err != nil {

View File

@ -19,7 +19,7 @@ package e2e_node
import ( import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -35,24 +35,24 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
Context("On terminatation of pod with memory backed volume", func() { Context("On terminatation of pod with memory backed volume", func() {
It("should remove the volume from the node", func() { It("should remove the volume from the node", func() {
var ( var (
memoryBackedPod *api.Pod memoryBackedPod *v1.Pod
volumeName string volumeName string
) )
By("Creating a pod with a memory backed volume that exits success without restart", func() { By("Creating a pod with a memory backed volume that exits success without restart", func() {
volumeName = "memory-volume" volumeName = "memory-volume"
memoryBackedPod = f.PodClient().Create(&api.Pod{ memoryBackedPod = f.PodClient().Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()), Name: "pod" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Name: "container" + string(uuid.NewUUID()), Name: "container" + string(uuid.NewUUID()),
Command: []string{"sh", "-c", "echo"}, Command: []string{"sh", "-c", "echo"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: "/tmp", MountPath: "/tmp",
@ -60,11 +60,11 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
}, },
}, },
}, },
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}, EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
}, },
}, },
}, },
@ -79,19 +79,19 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
// need to create a new verification pod on each pass since updates // need to create a new verification pod on each pass since updates
//to the HostPath volume aren't propogated to the pod //to the HostPath volume aren't propogated to the pod
pod := f.PodClient().Create(&api.Pod{ pod := f.PodClient().Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()), Name: "pod" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Name: "container" + string(uuid.NewUUID()), Name: "container" + string(uuid.NewUUID()),
Command: []string{"sh", "-c", "if [ -d " + volumePath + " ]; then exit 1; fi;"}, Command: []string{"sh", "-c", "if [ -d " + volumePath + " ]; then exit 1; fi;"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "kubelet-pods", Name: "kubelet-pods",
MountPath: "/tmp", MountPath: "/tmp",
@ -99,13 +99,13 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
}, },
}, },
}, },
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "kubelet-pods", Name: "kubelet-pods",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
// TODO: remove hardcoded kubelet volume directory path // TODO: remove hardcoded kubelet volume directory path
// framework.TestContext.KubeVolumeDir is currently not populated for node e2e // framework.TestContext.KubeVolumeDir is currently not populated for node e2e
HostPath: &api.HostPathVolumeSource{Path: "/var/lib/kubelet/pods"}, HostPath: &v1.HostPathVolumeSource{Path: "/var/lib/kubelet/pods"},
}, },
}, },
}, },
@ -113,7 +113,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
}) })
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
gp := int64(1) gp := int64(1)
f.PodClient().Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &gp}) f.PodClient().Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})
if err == nil { if err == nil {
break break
} }