add import-alias for k8s.io/api/core/v1

This commit is contained in:
Aaron Crickenberger 2019-06-20 11:40:15 -07:00
parent f3fd3cff8e
commit b8d0ce4019
23 changed files with 305 additions and 304 deletions

View File

@ -14,5 +14,6 @@
"k8s.io/api/batch/v1beta1": "batchv1beta1",
"k8s.io/api/certificates/v1beta1": "certificatesv1beta1",
"k8s.io/api/coordination/v1": "coordinationv1",
"k8s.io/api/coordination/v1beta1": "coordinationv1beta1"
"k8s.io/api/coordination/v1beta1": "coordinationv1beta1",
"k8s.io/api/core/v1": "v1"
}

View File

@ -23,7 +23,7 @@ import (
"time"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
@ -68,18 +68,18 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
})
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch pods.", func() {
pod := &apiv1.Pod{
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "audit-pod",
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "pause",
Image: imageutils.GetPauseImageName(),
}},
},
}
updatePod := func(pod *apiv1.Pod) {}
updatePod := func(pod *v1.Pod) {}
f.PodClient().CreateSync(pod)
@ -329,7 +329,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
})
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch configmaps.", func() {
configMap := &apiv1.ConfigMap{
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "audit-configmap",
},
@ -462,7 +462,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
})
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch secrets.", func() {
secret := &apiv1.Secret{
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "audit-secret",
},

View File

@ -24,7 +24,7 @@ import (
"github.com/onsi/ginkgo"
auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1"
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@ -59,26 +59,26 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
anonymousClient, err := clientset.NewForConfig(config)
framework.ExpectNoError(err, "failed to create the anonymous client")
_, err = f.ClientSet.CoreV1().Namespaces().Create(&apiv1.Namespace{
_, err = f.ClientSet.CoreV1().Namespaces().Create(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "audit",
},
})
framework.ExpectNoError(err, "failed to create namespace")
_, err = f.ClientSet.CoreV1().Pods(namespace).Create(&apiv1.Pod{
_, err = f.ClientSet.CoreV1().Pods(namespace).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "audit-proxy",
Labels: map[string]string{
"app": "audit",
},
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "proxy",
Image: imageutils.GetE2EImage(imageutils.AuditProxy),
Ports: []apiv1.ContainerPort{
Ports: []v1.ContainerPort{
{
ContainerPort: 8080,
},
@ -89,12 +89,12 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
})
framework.ExpectNoError(err, "failed to create proxy pod")
_, err = f.ClientSet.CoreV1().Services(namespace).Create(&apiv1.Service{
_, err = f.ClientSet.CoreV1().Services(namespace).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "audit",
},
Spec: apiv1.ServiceSpec{
Ports: []apiv1.ServicePort{
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Port: 80,
TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: 8080},
@ -182,18 +182,18 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
// https://github.com/kubernetes/kubernetes/issues/70818
{
func() {
pod := &apiv1.Pod{
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "audit-pod",
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "pause",
Image: imageutils.GetPauseImageName(),
}},
},
}
updatePod := func(pod *apiv1.Pod) {}
updatePod := func(pod *v1.Pod) {}
f.PodClient().CreateSync(pod)

View File

@ -24,7 +24,7 @@ import (
gcm "google.golang.org/api/monitoring/v3"
appsv1 "k8s.io/api/apps/v1"
as "k8s.io/api/autoscaling/v2beta1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
@ -222,7 +222,7 @@ type CustomMetricTestCase struct {
hpa *as.HorizontalPodAutoscaler
kubeClient clientset.Interface
deployment *appsv1.Deployment
pod *corev1.Pod
pod *v1.Pod
initialReplicas int
scaledReplicas int
}
@ -285,7 +285,7 @@ func (tc *CustomMetricTestCase) Run() {
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas)
}
func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *corev1.Pod) error {
func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) error {
if deployment != nil {
_, err := cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Create(deployment)
if err != nil {
@ -301,7 +301,7 @@ func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, dep
return nil
}
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *corev1.Pod) {
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) {
if deployment != nil {
_ = cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Delete(deployment.ObjectMeta.Name, &metav1.DeleteOptions{})
}

View File

@ -19,7 +19,7 @@ package common
import (
"fmt"
api "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/security/apparmor"
@ -58,7 +58,7 @@ func LoadAppArmorProfiles(f *framework.Framework) {
// CreateAppArmorTestPod creates a pod that tests apparmor profile enforcement. The pod exits with
// an error code if the profile is incorrectly enforced. If runOnce is true the pod will exit after
// a single test, otherwise it will repeat the test every 1 second until failure.
func CreateAppArmorTestPod(f *framework.Framework, unconfined bool, runOnce bool) *api.Pod {
func CreateAppArmorTestPod(f *framework.Framework, unconfined bool, runOnce bool) *v1.Pod {
profile := "localhost/" + appArmorProfilePrefix + f.Namespace.Name
testCmd := fmt.Sprintf(`
if touch %[1]s; then
@ -92,9 +92,9 @@ sleep 1
done`, testCmd)
}
loaderAffinity := &api.Affinity{
PodAffinity: &api.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{{
loaderAffinity := &v1.Affinity{
PodAffinity: &v1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{{
Namespaces: []string{f.Namespace.Name},
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{loaderLabelKey: loaderLabelValue},
@ -104,7 +104,7 @@ done`, testCmd)
},
}
pod := &api.Pod{
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-apparmor-",
Annotations: map[string]string{
@ -114,14 +114,14 @@ done`, testCmd)
"test": "apparmor",
},
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
Affinity: loaderAffinity,
Containers: []api.Container{{
Containers: []v1.Container{{
Name: "test",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", testCmd},
}},
RestartPolicy: api.RestartPolicyNever,
RestartPolicy: v1.RestartPolicyNever,
},
}
@ -157,7 +157,7 @@ profile %s flags=(attach_disconnected) {
}
`, profileName, appArmorDeniedPath, appArmorAllowedPath)
cm := &api.ConfigMap{
cm := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "apparmor-profiles",
Namespace: f.Namespace.Name,
@ -173,26 +173,26 @@ profile %s flags=(attach_disconnected) {
func createAppArmorProfileLoader(f *framework.Framework) {
True := true
One := int32(1)
loader := &api.ReplicationController{
loader := &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "apparmor-loader",
Namespace: f.Namespace.Name,
},
Spec: api.ReplicationControllerSpec{
Spec: v1.ReplicationControllerSpec{
Replicas: &One,
Template: &api.PodTemplateSpec{
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{loaderLabelKey: loaderLabelValue},
},
Spec: api.PodSpec{
Containers: []api.Container{{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "apparmor-loader",
Image: imageutils.GetE2EImage(imageutils.AppArmorLoader),
Args: []string{"-poll", "10s", "/profiles"},
SecurityContext: &api.SecurityContext{
SecurityContext: &v1.SecurityContext{
Privileged: &True,
},
VolumeMounts: []api.VolumeMount{{
VolumeMounts: []v1.VolumeMount{{
Name: "sys",
MountPath: "/sys",
ReadOnly: true,
@ -206,25 +206,25 @@ func createAppArmorProfileLoader(f *framework.Framework) {
ReadOnly: true,
}},
}},
Volumes: []api.Volume{{
Volumes: []v1.Volume{{
Name: "sys",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/sys",
},
},
}, {
Name: "apparmor-includes",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/apparmor.d",
},
},
}, {
Name: "profiles",
VolumeSource: api.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{
LocalObjectReference: api.LocalObjectReference{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "apparmor-profiles",
},
},
@ -241,7 +241,7 @@ func createAppArmorProfileLoader(f *framework.Framework) {
getRunningLoaderPod(f)
}
func getRunningLoaderPod(f *framework.Framework) *api.Pod {
func getRunningLoaderPod(f *framework.Framework) *v1.Pod {
label := labels.SelectorFromSet(labels.Set(map[string]string{loaderLabelKey: loaderLabelValue}))
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label)
framework.ExpectNoError(err, "Failed to schedule apparmor-loader Pod")

View File

@ -21,7 +21,7 @@ import (
"time"
coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/diff"
@ -48,7 +48,7 @@ var _ = framework.KubeDescribe("NodeLease", func() {
ginkgo.Context("when the NodeLease feature is enabled", func() {
ginkgo.It("the kubelet should create and update a lease in the kube-node-lease namespace", func() {
leaseClient := f.ClientSet.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
leaseClient := f.ClientSet.CoordinationV1beta1().Leases(v1.NamespaceNodeLease)
var (
err error
lease *coordinationv1beta1.Lease
@ -93,7 +93,7 @@ var _ = framework.KubeDescribe("NodeLease", func() {
var err error
var lease *coordinationv1beta1.Lease
gomega.Eventually(func() error {
lease, err = f.ClientSet.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease).Get(nodeName, metav1.GetOptions{})
lease, err = f.ClientSet.CoordinationV1beta1().Leases(v1.NamespaceNodeLease).Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
@ -154,17 +154,17 @@ var _ = framework.KubeDescribe("NodeLease", func() {
// run controller manager, i.e., no node lifecycle controller.
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
gomega.Expect(err).To(gomega.BeNil())
_, readyCondition := testutils.GetNodeCondition(&node.Status, corev1.NodeReady)
gomega.Expect(readyCondition.Status).To(gomega.Equal(corev1.ConditionTrue))
_, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady)
gomega.Expect(readyCondition.Status).To(gomega.Equal(v1.ConditionTrue))
})
})
})
func getHeartbeatTimeAndStatus(clientSet clientset.Interface, nodeName string) (time.Time, corev1.NodeStatus) {
func getHeartbeatTimeAndStatus(clientSet clientset.Interface, nodeName string) (time.Time, v1.NodeStatus) {
node, err := clientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
gomega.Expect(err).To(gomega.BeNil())
_, readyCondition := testutils.GetNodeCondition(&node.Status, corev1.NodeReady)
gomega.Expect(readyCondition.Status).To(gomega.Equal(corev1.ConditionTrue))
_, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady)
gomega.Expect(readyCondition.Status).To(gomega.Equal(v1.ConditionTrue))
heartbeatTime := readyCondition.LastHeartbeatTime.Time
readyCondition.LastHeartbeatTime = metav1.Time{}
return heartbeatTime, node.Status

View File

@ -20,7 +20,7 @@ import (
"fmt"
"sync"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
@ -54,7 +54,7 @@ func privilegedPSP(name string) *policy.PodSecurityPolicy {
Spec: policy.PodSecurityPolicySpec{
Privileged: true,
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
AllowedCapabilities: []corev1.Capability{"*"},
AllowedCapabilities: []v1.Capability{"*"},
Volumes: []policy.FSType{policy.All},
HostNetwork: true,
HostPorts: []policy.HostPortRange{{Min: 0, Max: 65535}},

View File

@ -19,7 +19,7 @@ package utils
import (
"fmt"
api_v1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
api "k8s.io/kubernetes/pkg/apis/core"
@ -85,7 +85,7 @@ func EnsureLoggingAgentRestartsCount(f *framework.Framework, appName string, max
return nil
}
func getLoggingAgentPods(f *framework.Framework, appName string) (*api_v1.PodList, error) {
func getLoggingAgentPods(f *framework.Framework, appName string) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": appName}))
options := meta_v1.ListOptions{LabelSelector: label.String()}
return f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options)

View File

@ -22,7 +22,7 @@ import (
"fmt"
api_v1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
@ -96,23 +96,23 @@ func (p *loadLoggingPod) Name() string {
func (p *loadLoggingPod) Start(f *framework.Framework) error {
e2elog.Logf("Starting load logging pod %s", p.name)
f.PodClient().Create(&api_v1.Pod{
f.PodClient().Create(&v1.Pod{
ObjectMeta: meta_v1.ObjectMeta{
Name: p.name,
},
Spec: api_v1.PodSpec{
RestartPolicy: api_v1.RestartPolicyNever,
Containers: []api_v1.Container{
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: loggingContainerName,
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{"logs-generator", "-log-lines-total", strconv.Itoa(p.expectedLinesCount), "-run-duration", p.runDuration.String()},
Resources: api_v1.ResourceRequirements{
Requests: api_v1.ResourceList{
api_v1.ResourceCPU: *resource.NewMilliQuantity(
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
loggingContainerCPURequest,
resource.DecimalSI),
api_v1.ResourceMemory: *resource.NewQuantity(
v1.ResourceMemory: *resource.NewQuantity(
loggingContainerMemoryRequest,
resource.BinarySI),
},
@ -162,22 +162,22 @@ func (p *execLoggingPod) Name() string {
func (p *execLoggingPod) Start(f *framework.Framework) error {
e2elog.Logf("Starting repeating logging pod %s", p.name)
f.PodClient().Create(&api_v1.Pod{
f.PodClient().Create(&v1.Pod{
ObjectMeta: meta_v1.ObjectMeta{
Name: p.name,
},
Spec: api_v1.PodSpec{
Containers: []api_v1.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: loggingContainerName,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: p.cmd,
Resources: api_v1.ResourceRequirements{
Requests: api_v1.ResourceList{
api_v1.ResourceCPU: *resource.NewMilliQuantity(
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
loggingContainerCPURequest,
resource.DecimalSI),
api_v1.ResourceMemory: *resource.NewQuantity(
v1.ResourceMemory: *resource.NewQuantity(
loggingContainerMemoryRequest,
resource.BinarySI),
},

View File

@ -23,7 +23,7 @@ import (
gcm "google.golang.org/api/monitoring/v3"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
@ -99,7 +99,7 @@ func SimpleStackdriverExporterDeployment(name, namespace string, replicas int32,
// is exposed by a different container in one pod.
// The metric names and values are configured via the containers parameter.
func StackdriverExporterDeployment(name, namespace string, replicas int32, containers []CustomMetricContainerSpec) *appsv1.Deployment {
podSpec := corev1.PodSpec{Containers: []corev1.Container{}}
podSpec := v1.PodSpec{Containers: []v1.Container{}}
for _, containerSpec := range containers {
podSpec.Containers = append(podSpec.Containers, stackdriverExporterContainerSpec(containerSpec.Name, namespace, containerSpec.MetricName, containerSpec.MetricValue))
}
@ -113,7 +113,7 @@ func StackdriverExporterDeployment(name, namespace string, replicas int32, conta
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"name": name},
},
Template: corev1.PodTemplateSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"name": name,
@ -128,8 +128,8 @@ func StackdriverExporterDeployment(name, namespace string, replicas int32, conta
// StackdriverExporterPod is a Pod of simple application that exports a metric of fixed value to
// Stackdriver in a loop.
func StackdriverExporterPod(podName, namespace, podLabel, metricName string, metricValue int64) *corev1.Pod {
return &corev1.Pod{
func StackdriverExporterPod(podName, namespace, podLabel, metricName string, metricValue int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: namespace,
@ -137,17 +137,17 @@ func StackdriverExporterPod(podName, namespace, podLabel, metricName string, met
"name": podLabel,
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{stackdriverExporterContainerSpec(StackdriverExporter, namespace, metricName, metricValue)},
Spec: v1.PodSpec{
Containers: []v1.Container{stackdriverExporterContainerSpec(StackdriverExporter, namespace, metricName, metricValue)},
},
}
}
func stackdriverExporterContainerSpec(name string, namespace string, metricName string, metricValue int64) corev1.Container {
return corev1.Container{
func stackdriverExporterContainerSpec(name string, namespace string, metricName string, metricValue int64) v1.Container {
return v1.Container{
Name: name,
Image: imageutils.GetE2EImage(imageutils.SdDummyExporter),
ImagePullPolicy: corev1.PullPolicy("Always"),
ImagePullPolicy: v1.PullPolicy("Always"),
Command: []string{
"/bin/sh",
"-c",
@ -162,25 +162,25 @@ func stackdriverExporterContainerSpec(name string, namespace string, metricName
"--use-new-resource-model",
}, " "),
},
Env: []corev1.EnvVar{
Env: []v1.EnvVar{
{
Name: "POD_ID",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.uid",
},
},
},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
},
Ports: []corev1.ContainerPort{{ContainerPort: 80}},
Ports: []v1.ContainerPort{{ContainerPort: 80}},
}
}
@ -197,7 +197,7 @@ func PrometheusExporterDeployment(name, namespace string, replicas int32, metric
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"name": name},
},
Template: corev1.PodTemplateSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"name": name,
@ -210,36 +210,36 @@ func PrometheusExporterDeployment(name, namespace string, replicas int32, metric
}
}
func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) corev1.PodSpec {
return corev1.PodSpec{
Containers: []corev1.Container{
func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) v1.PodSpec {
return v1.PodSpec{
Containers: []v1.Container{
{
Name: "prometheus-exporter",
Image: imageutils.GetE2EImage(imageutils.PrometheusDummyExporter),
ImagePullPolicy: corev1.PullPolicy("Always"),
ImagePullPolicy: v1.PullPolicy("Always"),
Command: []string{"/prometheus_dummy_exporter", "--metric-name=" + metricName,
fmt.Sprintf("--metric-value=%v", metricValue), fmt.Sprintf("=--port=%d", port)},
Ports: []corev1.ContainerPort{{ContainerPort: port}},
Ports: []v1.ContainerPort{{ContainerPort: port}},
},
{
Name: "prometheus-to-sd",
Image: imageutils.GetE2EImage(imageutils.PrometheusToSd),
ImagePullPolicy: corev1.PullPolicy("Always"),
ImagePullPolicy: v1.PullPolicy("Always"),
Command: []string{"/monitor", fmt.Sprintf("--source=:http://localhost:%d", port),
"--stackdriver-prefix=custom.googleapis.com", "--pod-id=$(POD_ID)", "--namespace-id=$(POD_NAMESPACE)"},
Env: []corev1.EnvVar{
Env: []v1.EnvVar{
{
Name: "POD_ID",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.uid",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},

View File

@ -21,7 +21,7 @@ import (
"strings"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
@ -99,7 +99,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
})
ginkgo.It("node lease should be deleted when corresponding node is deleted", func() {
leaseClient := c.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
leaseClient := c.CoordinationV1beta1().Leases(v1.NamespaceNodeLease)
err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
gomega.Expect(err).To(gomega.BeNil())

View File

@ -24,7 +24,7 @@ import (
"k8s.io/client-go/tools/cache"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
schedulerapi "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
@ -52,7 +52,7 @@ type priorityPair struct {
var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
var cs clientset.Interface
var nodeList *corev1.NodeList
var nodeList *v1.NodeList
var ns string
f := framework.NewDefaultFramework("sched-preemption")
@ -75,7 +75,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
ginkgo.BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &corev1.NodeList{}
nodeList = &v1.NodeList{}
for _, pair := range priorityPairs {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value})
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true))
@ -92,10 +92,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
// enough resources is found, scheduler preempts a lower priority pod to schedule
// the high priority pod.
ginkgo.It("validates basic preemption works", func() {
var podRes corev1.ResourceList
var podRes v1.ResourceList
// Create one pod per node that uses a lot of the node's resources.
ginkgo.By("Create pods that use 60% of node resources.")
pods := make([]*corev1.Pod, len(nodeList.Items))
pods := make([]*v1.Pod, len(nodeList.Items))
for i, node := range nodeList.Items {
cpuAllocatable, found := node.Status.Allocatable["cpu"]
gomega.Expect(found).To(gomega.Equal(true))
@ -103,9 +103,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
memAllocatable, found := node.Status.Allocatable["memory"]
gomega.Expect(found).To(gomega.Equal(true))
memory := memAllocatable.Value() * 60 / 100
podRes = corev1.ResourceList{}
podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[corev1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
// make the first pod low priority and the rest medium priority.
priorityName := mediumPriorityClassName
@ -115,7 +115,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &corev1.ResourceRequirements{
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
@ -131,7 +131,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
runPausePod(f, pausePodConfig{
Name: "preemptor-pod",
PriorityClassName: highPriorityClassName,
Resources: &corev1.ResourceRequirements{
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
@ -152,10 +152,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
// enough resources is found, scheduler preempts a lower priority pod to schedule
// this critical pod.
ginkgo.It("validates lower priority pod preemption by critical pod", func() {
var podRes corev1.ResourceList
var podRes v1.ResourceList
// Create one pod per node that uses a lot of the node's resources.
ginkgo.By("Create pods that use 60% of node resources.")
pods := make([]*corev1.Pod, len(nodeList.Items))
pods := make([]*v1.Pod, len(nodeList.Items))
for i, node := range nodeList.Items {
cpuAllocatable, found := node.Status.Allocatable["cpu"]
gomega.Expect(found).To(gomega.Equal(true))
@ -163,9 +163,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
memAllocatable, found := node.Status.Allocatable["memory"]
gomega.Expect(found).To(gomega.Equal(true))
memory := memAllocatable.Value() * 60 / 100
podRes = corev1.ResourceList{}
podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[corev1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
// make the first pod low priority and the rest medium priority.
priorityName := mediumPriorityClassName
@ -175,7 +175,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &corev1.ResourceRequirements{
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
@ -192,7 +192,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
Name: "critical-pod",
Namespace: metav1.NamespaceSystem,
PriorityClassName: scheduling.SystemClusterCritical,
Resources: &corev1.ResourceRequirements{
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
@ -220,14 +220,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
// It also verifies that existing low priority pods are not preempted as their
// preemption wouldn't help.
ginkgo.It("validates pod anti-affinity works in preemption", func() {
var podRes corev1.ResourceList
var podRes v1.ResourceList
// Create a few pods that uses a small amount of resources.
ginkgo.By("Create pods that use 10% of node resources.")
numPods := 4
if len(nodeList.Items) < numPods {
numPods = len(nodeList.Items)
}
pods := make([]*corev1.Pod, numPods)
pods := make([]*v1.Pod, numPods)
for i := 0; i < numPods; i++ {
node := nodeList.Items[i]
cpuAllocatable, found := node.Status.Allocatable["cpu"]
@ -236,9 +236,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
memAllocatable, found := node.Status.Allocatable["memory"]
gomega.Expect(found).To(gomega.BeTrue())
memory := memAllocatable.Value() * 10 / 100
podRes = corev1.ResourceList{}
podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[corev1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
// Apply node label to each node
framework.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name)
@ -252,12 +252,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &corev1.ResourceRequirements{
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
Affinity: &corev1.Affinity{
PodAntiAffinity: &corev1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
Affinity: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
@ -272,14 +272,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
},
},
},
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []corev1.NodeSelectorRequirement{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "node",
Operator: corev1.NodeSelectorOpIn,
Operator: v1.NodeSelectorOpIn,
Values: []string{node.Name},
},
},
@ -308,15 +308,15 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
Name: "preemptor-pod",
PriorityClassName: highPriorityClassName,
Labels: map[string]string{"service": "blah"},
Affinity: &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []corev1.NodeSelectorRequirement{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "node",
Operator: corev1.NodeSelectorOpIn,
Operator: v1.NodeSelectorOpIn,
Values: []string{nodeList.Items[0].Name},
},
},
@ -379,11 +379,11 @@ var _ = SIGDescribe("PodPriorityResolution [Serial]", func() {
// construct a fakecpu so as to set it to status of Node object
// otherwise if we update CPU/Memory/etc, those values will be corrected back by kubelet
var fakecpu corev1.ResourceName = "example.com/fakecpu"
var fakecpu v1.ResourceName = "example.com/fakecpu"
var _ = SIGDescribe("PreemptionExecutionPath", func() {
var cs clientset.Interface
var node *corev1.Node
var node *v1.Node
var ns, nodeHostNameLabel string
f := framework.NewDefaultFramework("sched-preemption-path")
@ -475,11 +475,11 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
return f.ClientSet.CoreV1().Pods(ns).Watch(options)
},
},
&corev1.Pod{},
&v1.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if pod, ok := obj.(*corev1.Pod); ok {
if pod, ok := obj.(*v1.Pod); ok {
podNamesSeen[pod.Name] = struct{}{}
}
},
@ -498,9 +498,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
Labels: map[string]string{"name": "pod1"},
PriorityClassName: "p1",
NodeSelector: map[string]string{"kubernetes.io/hostname": nodeHostNameLabel},
Resources: &corev1.ResourceRequirements{
Requests: corev1.ResourceList{fakecpu: resource.MustParse("40")},
Limits: corev1.ResourceList{fakecpu: resource.MustParse("40")},
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{fakecpu: resource.MustParse("40")},
Limits: v1.ResourceList{fakecpu: resource.MustParse("40")},
},
},
},
@ -512,9 +512,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
Labels: map[string]string{"name": "pod2"},
PriorityClassName: "p2",
NodeSelector: map[string]string{"kubernetes.io/hostname": nodeHostNameLabel},
Resources: &corev1.ResourceRequirements{
Requests: corev1.ResourceList{fakecpu: resource.MustParse("50")},
Limits: corev1.ResourceList{fakecpu: resource.MustParse("50")},
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{fakecpu: resource.MustParse("50")},
Limits: v1.ResourceList{fakecpu: resource.MustParse("50")},
},
},
},
@ -526,9 +526,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
Labels: map[string]string{"name": "pod3"},
PriorityClassName: "p3",
NodeSelector: map[string]string{"kubernetes.io/hostname": nodeHostNameLabel},
Resources: &corev1.ResourceRequirements{
Requests: corev1.ResourceList{fakecpu: resource.MustParse("95")},
Limits: corev1.ResourceList{fakecpu: resource.MustParse("95")},
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{fakecpu: resource.MustParse("95")},
Limits: v1.ResourceList{fakecpu: resource.MustParse("95")},
},
},
},
@ -540,9 +540,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
Labels: map[string]string{"name": "pod4"},
PriorityClassName: "p4",
NodeSelector: map[string]string{"kubernetes.io/hostname": nodeHostNameLabel},
Resources: &corev1.ResourceRequirements{
Requests: corev1.ResourceList{fakecpu: resource.MustParse("400")},
Limits: corev1.ResourceList{fakecpu: resource.MustParse("400")},
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{fakecpu: resource.MustParse("400")},
Limits: v1.ResourceList{fakecpu: resource.MustParse("400")},
},
},
},
@ -604,7 +604,7 @@ func initPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet
Selector: &metav1.LabelSelector{
MatchLabels: pausePod.Labels,
},
Template: corev1.PodTemplateSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{Labels: pausePod.ObjectMeta.Labels},
Spec: pausePod.Spec,
},

View File

@ -17,7 +17,7 @@ limitations under the License.
package upgrades
import (
api "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
@ -30,7 +30,7 @@ import (
// AppArmorUpgradeTest tests that AppArmor profiles are enforced & usable across upgrades.
type AppArmorUpgradeTest struct {
pod *api.Pod
pod *v1.Pod
}
// Name returns the tracking name of the test.
@ -87,7 +87,7 @@ func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod")
pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Should be able to get pod")
gomega.Expect(pod.Status.Phase).To(gomega.Equal(api.PodRunning), "Pod should stay running")
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning), "Pod should stay running")
gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running")
gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).To(gomega.BeZero(), "Container should not need to be restarted")
}
@ -111,5 +111,5 @@ func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework)
}
func conditionType(condition interface{}) string {
return string(condition.(api.NodeCondition).Type)
return string(condition.(v1.NodeCondition).Type)
}

View File

@ -21,7 +21,7 @@ import (
"strings"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -46,12 +46,12 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA [Slow]", func(
container2Name := "container2"
container2Domain := "contoso.org"
pod := &corev1.Pod{
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: container1Name,
Image: imageutils.GetPauseImageName(),
@ -59,15 +59,15 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA [Slow]", func(
{
Name: container2Name,
Image: imageutils.GetPauseImageName(),
SecurityContext: &corev1.SecurityContext{
WindowsOptions: &corev1.WindowsSecurityContextOptions{
SecurityContext: &v1.SecurityContext{
WindowsOptions: &v1.WindowsSecurityContextOptions{
GMSACredentialSpec: generateDummyCredSpecs(container2Domain),
},
},
},
},
SecurityContext: &corev1.PodSecurityContext{
WindowsOptions: &corev1.WindowsSecurityContextOptions{
SecurityContext: &v1.PodSecurityContext{
WindowsOptions: &v1.WindowsSecurityContextOptions{
GMSACredentialSpec: generateDummyCredSpecs(podDomain),
},
},

View File

@ -24,7 +24,7 @@ import (
"github.com/davecgh/go-spew/spew"
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -46,7 +46,7 @@ import (
const itDescription = "status and events should match expectations"
type expectNodeConfigStatus struct {
lastKnownGood *apiv1.NodeConfigSource
lastKnownGood *v1.NodeConfigSource
err string
// If true, expect Status.Config.Active == Status.Config.LastKnownGood,
// otherwise expect Status.Config.Active == Status.Config.Assigned.
@ -55,8 +55,8 @@ type expectNodeConfigStatus struct {
type nodeConfigTestCase struct {
desc string
configSource *apiv1.NodeConfigSource
configMap *apiv1.ConfigMap
configSource *v1.NodeConfigSource
configMap *v1.ConfigMap
expectConfigStatus expectNodeConfigStatus
expectConfig *kubeletconfig.KubeletConfiguration
// whether to expect this substring in an error returned from the API server when updating the config source
@ -71,8 +71,8 @@ type nodeConfigTestCase struct {
// This test is marked [Disruptive] because the Kubelet restarts several times during this test.
var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:DynamicKubeletConfig][Serial][Disruptive]", func() {
f := framework.NewDefaultFramework("dynamic-kubelet-configuration-test")
var beforeNode *apiv1.Node
var beforeConfigMap *apiv1.ConfigMap
var beforeNode *v1.Node
var beforeConfigMap *v1.ConfigMap
var beforeKC *kubeletconfig.KubeletConfiguration
var localKC *kubeletconfig.KubeletConfiguration
@ -145,7 +145,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
framework.ExpectNoError(err)
// fail to parse, we insert some bogus stuff into the configMap
failParseConfigMap := &apiv1.ConfigMap{
failParseConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "dynamic-kubelet-config-test-fail-parse"},
Data: map[string]string{
"kubelet": "{0xdeadbeef}",
@ -161,17 +161,17 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
failValidateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failValidateConfigMap)
framework.ExpectNoError(err)
correctSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
correctSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: correctConfigMap.Namespace,
Name: correctConfigMap.Name,
KubeletConfigKey: "kubelet",
}}
failParseSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
failParseSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: failParseConfigMap.Namespace,
Name: failParseConfigMap.Name,
KubeletConfigKey: "kubelet",
}}
failValidateSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
failValidateSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: failValidateConfigMap.Namespace,
Name: failValidateConfigMap.Name,
KubeletConfigKey: "kubelet",
@ -187,12 +187,12 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
},
{
desc: "Node.Spec.ConfigSource has all nil subfields",
configSource: &apiv1.NodeConfigSource{},
configSource: &v1.NodeConfigSource{},
apierr: "exactly one reference subfield must be non-nil",
},
{
desc: "Node.Spec.ConfigSource.ConfigMap is missing namespace",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Name: "bar",
KubeletConfigKey: "kubelet",
}}, // missing Namespace
@ -200,7 +200,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
},
{
desc: "Node.Spec.ConfigSource.ConfigMap is missing name",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: "foo",
KubeletConfigKey: "kubelet",
}}, // missing Name
@ -208,7 +208,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
},
{
desc: "Node.Spec.ConfigSource.ConfigMap is missing kubeletConfigKey",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: "foo",
Name: "bar",
}}, // missing KubeletConfigKey
@ -216,7 +216,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
},
{
desc: "Node.Spec.ConfigSource.ConfigMap.UID is illegally specified",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
UID: "foo",
Name: "bar",
Namespace: "baz",
@ -226,7 +226,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
},
{
desc: "Node.Spec.ConfigSource.ConfigMap.ResourceVersion is illegally specified",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Name: "bar",
Namespace: "baz",
ResourceVersion: "1",
@ -236,7 +236,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
},
{
desc: "Node.Spec.ConfigSource.ConfigMap has invalid namespace",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Name: "bar",
Namespace: "../baz",
KubeletConfigKey: "kubelet",
@ -245,7 +245,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
},
{
desc: "Node.Spec.ConfigSource.ConfigMap has invalid name",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Name: "../bar",
Namespace: "baz",
KubeletConfigKey: "kubelet",
@ -254,7 +254,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
},
{
desc: "Node.Spec.ConfigSource.ConfigMap has invalid kubeletConfigKey",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Name: "bar",
Namespace: "baz",
KubeletConfigKey: "../qux",
@ -310,7 +310,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
framework.ExpectNoError(err)
// bad config map, we insert some bogus stuff into the configMap
badConfigMap := &apiv1.ConfigMap{
badConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "dynamic-kubelet-config-test-bad"},
Data: map[string]string{
"kubelet": "{0xdeadbeef}",
@ -319,7 +319,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
badConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(badConfigMap)
framework.ExpectNoError(err)
lkgSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
lkgSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: lkgConfigMap.Namespace,
Name: lkgConfigMap.Name,
KubeletConfigKey: "kubelet",
@ -328,7 +328,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
lkgStatus.ConfigMap.UID = lkgConfigMap.UID
lkgStatus.ConfigMap.ResourceVersion = lkgConfigMap.ResourceVersion
badSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
badSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: badConfigMap.Namespace,
Name: badConfigMap.Name,
KubeletConfigKey: "kubelet",
@ -375,7 +375,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
combinedConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(combinedConfigMap)
framework.ExpectNoError(err)
lkgSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
lkgSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: combinedConfigMap.Namespace,
Name: combinedConfigMap.Name,
KubeletConfigKey: "kubelet",
@ -428,7 +428,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
lkgConfigMap1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap1)
framework.ExpectNoError(err)
lkgSource1 := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
lkgSource1 := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: lkgConfigMap1.Namespace,
Name: lkgConfigMap1.Name,
KubeletConfigKey: "kubelet",
@ -441,7 +441,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
lkgConfigMap2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap2)
framework.ExpectNoError(err)
lkgSource2 := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
lkgSource2 := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: lkgConfigMap2.Namespace,
Name: lkgConfigMap2.Name,
KubeletConfigKey: "kubelet",
@ -500,13 +500,13 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
cm2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cm2)
framework.ExpectNoError(err)
cm1Source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
cm1Source := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: cm1.Namespace,
Name: cm1.Name,
KubeletConfigKey: "kubelet",
}}
cm2Source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
cm2Source := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: cm2.Namespace,
Name: cm2.Name,
KubeletConfigKey: "kubelet",
@ -563,8 +563,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
// ensure node config source is set to the config map we will mutate in-place,
// since updateConfigMapFunc doesn't mutate Node.Spec.ConfigSource
source := &apiv1.NodeConfigSource{
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
source := &v1.NodeConfigSource{
ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: correctConfigMap.Namespace,
Name: correctConfigMap.Name,
KubeletConfigKey: "kubelet",
@ -635,8 +635,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
"kubelet": "{0xdeadbeef}",
}
// ensure node config source is set to the config map we will mutate in-place
source := &apiv1.NodeConfigSource{
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
source := &v1.NodeConfigSource{
ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: lkgConfigMap.Namespace,
Name: lkgConfigMap.Name,
KubeletConfigKey: "kubelet",
@ -722,8 +722,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
// ensure node config source is set to the config map we will mutate in-place,
// since recreateConfigMapFunc doesn't mutate Node.Spec.ConfigSource
source := &apiv1.NodeConfigSource{
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
source := &v1.NodeConfigSource{
ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: correctConfigMap.Namespace,
Name: correctConfigMap.Name,
KubeletConfigKey: "kubelet",
@ -790,8 +790,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
// ensure node config source is set to the config map we will mutate in-place,
// since our mutation functions don't mutate Node.Spec.ConfigSource
source := &apiv1.NodeConfigSource{
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
source := &v1.NodeConfigSource{
ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: correctConfigMap.Namespace,
Name: correctConfigMap.Name,
KubeletConfigKey: "kubelet",
@ -984,7 +984,7 @@ func (tc *nodeConfigTestCase) checkConfigStatus(f *framework.Framework) {
}, timeout, interval).Should(BeNil())
}
func expectConfigStatus(tc *nodeConfigTestCase, actual *apiv1.NodeConfigStatus) error {
func expectConfigStatus(tc *nodeConfigTestCase, actual *v1.NodeConfigStatus) error {
var errs []string
if actual == nil {
return fmt.Errorf("expectConfigStatus requires actual to be non-nil (possible Kubelet failed to update status)")
@ -1052,7 +1052,7 @@ func (tc *nodeConfigTestCase) checkEvent(f *framework.Framework) {
return fmt.Errorf("checkEvent: case %s: %v", tc.desc, err)
}
// find config changed event with most recent timestamp
var recent *apiv1.Event
var recent *v1.Event
for i := range events.Items {
if events.Items[i].Reason == controller.KubeletConfigChangedEventReason {
if recent == nil {
@ -1110,7 +1110,7 @@ func (tc *nodeConfigTestCase) checkConfigMetrics(f *framework.Framework) {
}
}
// remote config helper
mkRemoteSample := func(name model.LabelValue, source *apiv1.NodeConfigSource) *model.Sample {
mkRemoteSample := func(name model.LabelValue, source *v1.NodeConfigSource) *model.Sample {
return &model.Sample{
Metric: model.Metric(map[model.LabelName]model.LabelValue{
model.MetricNameLabel: name,
@ -1192,6 +1192,6 @@ func (tc *nodeConfigTestCase) checkConfigMetrics(f *framework.Framework) {
}
// constructs the expected SelfLink for a config map
func configMapAPIPath(cm *apiv1.ConfigMap) string {
func configMapAPIPath(cm *v1.ConfigMap) string {
return fmt.Sprintf("/api/v1/namespaces/%s/configmaps/%s", cm.Namespace, cm.Name)
}

View File

@ -23,7 +23,7 @@ import (
"strings"
"time"
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@ -40,7 +40,7 @@ import (
)
// makePodToVerifyHugePages returns a pod that verifies specified cgroup with hugetlb
func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity) *apiv1.Pod {
func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity) *v1.Pod {
// convert the cgroup name to its literal form
cgroupFsName := ""
cgroupName := cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup, baseName)
@ -53,18 +53,18 @@ func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity)
// this command takes the expected value and compares it against the actual value for the pod cgroup hugetlb.2MB.limit_in_bytes
command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/hugetlb/%v/hugetlb.2MB.limit_in_bytes); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", hugePagesLimit.Value(), cgroupFsName)
e2elog.Logf("Pod to run command: %v", command)
pod := &apiv1.Pod{
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
},
Spec: apiv1.PodSpec{
RestartPolicy: apiv1.RestartPolicyNever,
Containers: []apiv1.Container{
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: busyboxImage,
Name: "container" + string(uuid.NewUUID()),
Command: []string{"sh", "-c", command},
VolumeMounts: []apiv1.VolumeMount{
VolumeMounts: []v1.VolumeMount{
{
Name: "sysfscgroup",
MountPath: "/tmp",
@ -72,11 +72,11 @@ func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity)
},
},
},
Volumes: []apiv1.Volume{
Volumes: []v1.Volume{
{
Name: "sysfscgroup",
VolumeSource: apiv1.VolumeSource{
HostPath: &apiv1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
},
},
},
@ -152,8 +152,8 @@ func pollResourceAsString(f *framework.Framework, resourceName string) string {
}
// amountOfResourceAsString returns the amount of resourceName advertised by a node
func amountOfResourceAsString(node *apiv1.Node, resourceName string) string {
val, ok := node.Status.Capacity[apiv1.ResourceName(resourceName)]
func amountOfResourceAsString(node *v1.Node, resourceName string) string {
val, ok := node.Status.Capacity[v1.ResourceName(resourceName)]
if !ok {
return ""
}
@ -163,21 +163,21 @@ func amountOfResourceAsString(node *apiv1.Node, resourceName string) string {
func runHugePagesTests(f *framework.Framework) {
It("should assign hugepages as expected based on the Pod spec", func() {
By("by running a G pod that requests hugepages")
pod := f.PodClient().Create(&apiv1.Pod{
pod := f.PodClient().Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name,
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: imageutils.GetPauseImageName(),
Name: "container" + string(uuid.NewUUID()),
Resources: apiv1.ResourceRequirements{
Limits: apiv1.ResourceList{
apiv1.ResourceName("cpu"): resource.MustParse("10m"),
apiv1.ResourceName("memory"): resource.MustParse("100Mi"),
apiv1.ResourceName("hugepages-2Mi"): resource.MustParse("50Mi"),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceName("cpu"): resource.MustParse("10m"),
v1.ResourceName("memory"): resource.MustParse("100Mi"),
v1.ResourceName("hugepages-2Mi"): resource.MustParse("50Mi"),
},
},
},

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/framework"
@ -33,8 +33,8 @@ import (
)
// makeNodePerfPod returns a pod with the information provided from the workload.
func makeNodePerfPod(w workloads.NodePerfWorkload) *corev1.Pod {
return &corev1.Pod{
func makeNodePerfPod(w workloads.NodePerfWorkload) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-pod", w.Name()),
},
@ -62,7 +62,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() {
wl workloads.NodePerfWorkload
oldCfg *kubeletconfig.KubeletConfiguration
newCfg *kubeletconfig.KubeletConfiguration
pod *corev1.Pod
pod *v1.Pod
)
JustBeforeEach(func() {
err := wl.PreTestExec()

View File

@ -21,7 +21,7 @@ import (
"strings"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
@ -39,19 +39,19 @@ func (w npbEPWorkload) Name() string {
return "npb-ep"
}
func (w npbEPWorkload) PodSpec() corev1.PodSpec {
var containers []corev1.Container
ctn := corev1.Container{
func (w npbEPWorkload) PodSpec() v1.PodSpec {
var containers []v1.Container
ctn := v1.Container{
Name: fmt.Sprintf("%s-ctn", w.Name()),
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep:1.0",
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("48Gi"),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("15000m"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("48Gi"),
},
Limits: corev1.ResourceList{
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("48Gi"),
Limits: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("15000m"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("48Gi"),
},
},
Command: []string{"/bin/sh"},
@ -59,8 +59,8 @@ func (w npbEPWorkload) PodSpec() corev1.PodSpec {
}
containers = append(containers, ctn)
return corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
return v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: containers,
}
}

View File

@ -21,7 +21,7 @@ import (
"strings"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
)
@ -37,19 +37,19 @@ func (w npbISWorkload) Name() string {
return "npb-is"
}
func (w npbISWorkload) PodSpec() corev1.PodSpec {
var containers []corev1.Container
ctn := corev1.Container{
func (w npbISWorkload) PodSpec() v1.PodSpec {
var containers []v1.Container
ctn := v1.Container{
Name: fmt.Sprintf("%s-ctn", w.Name()),
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-is:1.0",
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("16000m"),
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("48Gi"),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("16000m"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("48Gi"),
},
Limits: corev1.ResourceList{
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("16000m"),
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("48Gi"),
Limits: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("16000m"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("48Gi"),
},
},
Command: []string{"/bin/sh"},
@ -57,8 +57,8 @@ func (w npbISWorkload) PodSpec() corev1.PodSpec {
}
containers = append(containers, ctn)
return corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
return v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: containers,
}
}

View File

@ -21,7 +21,7 @@ import (
"strings"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
@ -39,19 +39,19 @@ func (w tfWideDeepWorkload) Name() string {
return "tensorflow-wide-deep"
}
func (w tfWideDeepWorkload) PodSpec() corev1.PodSpec {
var containers []corev1.Container
ctn := corev1.Container{
func (w tfWideDeepWorkload) PodSpec() v1.PodSpec {
var containers []v1.Container
ctn := v1.Container{
Name: fmt.Sprintf("%s-ctn", w.Name()),
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/tf-wide-deep-amd64:1.0",
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("16Gi"),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("15000m"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("16Gi"),
},
Limits: corev1.ResourceList{
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("16Gi"),
Limits: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("15000m"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("16Gi"),
},
},
Command: []string{"/bin/sh"},
@ -59,8 +59,8 @@ func (w tfWideDeepWorkload) PodSpec() corev1.PodSpec {
}
containers = append(containers, ctn)
return corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
return v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: containers,
}
}

View File

@ -19,7 +19,7 @@ package workloads
import (
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
)
@ -29,7 +29,7 @@ type NodePerfWorkload interface {
// Name of the workload.
Name() string
// PodSpec used to run this workload.
PodSpec() corev1.PodSpec
PodSpec() v1.PodSpec
// Timeout provides the expected time to completion
// for this workload.
Timeout() time.Duration

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@ -37,7 +37,7 @@ import (
)
// makePodToVerifyPids returns a pod that verifies specified cgroup with pids
func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *apiv1.Pod {
func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *v1.Pod {
// convert the cgroup name to its literal form
cgroupFsName := ""
cgroupName := cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup, baseName)
@ -50,18 +50,18 @@ func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *apiv1.Po
// this command takes the expected value and compares it against the actual value for the pod cgroup pids.max
command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/pids/%v/pids.max); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", pidsLimit.Value(), cgroupFsName)
e2elog.Logf("Pod to run command: %v", command)
pod := &apiv1.Pod{
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
},
Spec: apiv1.PodSpec{
RestartPolicy: apiv1.RestartPolicyNever,
Containers: []apiv1.Container{
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: busyboxImage,
Name: "container" + string(uuid.NewUUID()),
Command: []string{"sh", "-c", command},
VolumeMounts: []apiv1.VolumeMount{
VolumeMounts: []v1.VolumeMount{
{
Name: "sysfscgroup",
MountPath: "/tmp",
@ -69,11 +69,11 @@ func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *apiv1.Po
},
},
},
Volumes: []apiv1.Volume{
Volumes: []v1.Volume{
{
Name: "sysfscgroup",
VolumeSource: apiv1.VolumeSource{
HostPath: &apiv1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
},
},
},
@ -107,20 +107,20 @@ func enablePodPidsLimitInKubelet(f *framework.Framework) *kubeletconfig.KubeletC
func runPodPidsLimitTests(f *framework.Framework) {
It("should set pids.max for Pod", func() {
By("by creating a G pod")
pod := f.PodClient().Create(&apiv1.Pod{
pod := f.PodClient().Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name,
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: imageutils.GetPauseImageName(),
Name: "container" + string(uuid.NewUUID()),
Resources: apiv1.ResourceRequirements{
Limits: apiv1.ResourceList{
apiv1.ResourceName("cpu"): resource.MustParse("10m"),
apiv1.ResourceName("memory"): resource.MustParse("100Mi"),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceName("cpu"): resource.MustParse("10m"),
v1.ResourceName("memory"): resource.MustParse("100Mi"),
},
},
},

View File

@ -30,7 +30,7 @@ import (
"golang.org/x/net/context"
"k8s.io/klog"
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
@ -200,8 +200,8 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
}
// create the reference and set Node.Spec.ConfigSource
src := &apiv1.NodeConfigSource{
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
src := &v1.NodeConfigSource{
ConfigMap: &v1.ConfigMapNodeConfigSource{
Namespace: "kube-system",
Name: cm.Name,
KubeletConfigKey: "kubelet",
@ -233,7 +233,7 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
}
// sets the current node's configSource, this should only be called from Serial tests
func setNodeConfigSource(f *framework.Framework, source *apiv1.NodeConfigSource) error {
func setNodeConfigSource(f *framework.Framework, source *v1.NodeConfigSource) error {
// since this is a serial test, we just get the node, change the source, and then update it
// this prevents any issues with the patch API from affecting the test results
nodeclient := f.ClientSet.CoreV1().Nodes()
@ -310,7 +310,7 @@ func decodeConfigz(resp *http.Response) (*kubeletconfig.KubeletConfiguration, er
}
// creates a configmap containing kubeCfg in kube-system namespace
func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletConfiguration) (*apiv1.ConfigMap, error) {
func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletConfiguration) (*v1.ConfigMap, error) {
cmap := newKubeletConfigMap("testcfg", internalKC)
cmap, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cmap)
if err != nil {
@ -320,11 +320,11 @@ func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletCo
}
// constructs a ConfigMap, populating one of its keys with the KubeletConfiguration. Always uses GenerateName to generate a suffix.
func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfiguration) *apiv1.ConfigMap {
func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfiguration) *v1.ConfigMap {
data, err := kubeletconfigcodec.EncodeKubeletConfig(internalKC, kubeletconfigv1beta1.SchemeGroupVersion)
framework.ExpectNoError(err)
cmap := &apiv1.ConfigMap{
cmap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{GenerateName: name + "-"},
Data: map[string]string{
"kubelet": string(data),
@ -345,7 +345,7 @@ func logNodeEvents(f *framework.Framework) {
framework.ExpectNoError(err)
}
func getLocalNode(f *framework.Framework) *apiv1.Node {
func getLocalNode(f *framework.Framework) *v1.Node {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(Equal(1), "Unexpected number of node objects for node e2e. Expects only one node.")
return &nodeList.Items[0]