mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 15:37:24 +00:00
refactor: generated
This commit is contained in:
@@ -25,7 +25,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
|
||||
@@ -319,7 +319,7 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
||||
)
|
||||
var (
|
||||
mutex = &sync.Mutex{}
|
||||
watchTimes = make(map[string]unversioned.Time, 0)
|
||||
watchTimes = make(map[string]metav1.Time, 0)
|
||||
stopCh = make(chan struct{})
|
||||
)
|
||||
|
||||
@@ -358,8 +358,8 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
||||
|
||||
// Analyze results
|
||||
var (
|
||||
firstCreate unversioned.Time
|
||||
lastRunning unversioned.Time
|
||||
firstCreate metav1.Time
|
||||
lastRunning metav1.Time
|
||||
init = true
|
||||
e2eLags = make([]framework.PodLatencyData, 0)
|
||||
)
|
||||
@@ -429,10 +429,10 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
||||
|
||||
// createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation.
|
||||
// between creations there is an interval for throughput control
|
||||
func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]unversioned.Time {
|
||||
createTimes := make(map[string]unversioned.Time)
|
||||
func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time {
|
||||
createTimes := make(map[string]metav1.Time)
|
||||
for _, pod := range pods {
|
||||
createTimes[pod.ObjectMeta.Name] = unversioned.Now()
|
||||
createTimes[pod.ObjectMeta.Name] = metav1.Now()
|
||||
go f.PodClient().Create(pod)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
@@ -476,7 +476,7 @@ func verifyPodStartupLatency(expect, actual framework.LatencyMetric) error {
|
||||
}
|
||||
|
||||
// newInformerWatchPod creates an informer to check whether all pods are running.
|
||||
func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]unversioned.Time,
|
||||
func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time,
|
||||
podType string) *cache.Controller {
|
||||
ns := f.Namespace.Name
|
||||
checkPodRunning := func(p *v1.Pod) {
|
||||
@@ -486,7 +486,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
|
||||
|
||||
if p.Status.Phase == v1.PodRunning {
|
||||
if _, found := watchTimes[p.Name]; !found {
|
||||
watchTimes[p.Name] = unversioned.Now()
|
||||
watchTimes[p.Name] = metav1.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -523,15 +523,15 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
|
||||
|
||||
// createBatchPodSequential creats pods back-to-back in sequence.
|
||||
func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod) (time.Duration, []framework.PodLatencyData) {
|
||||
batchStartTime := unversioned.Now()
|
||||
batchStartTime := metav1.Now()
|
||||
e2eLags := make([]framework.PodLatencyData, 0)
|
||||
for _, pod := range pods {
|
||||
create := unversioned.Now()
|
||||
create := metav1.Now()
|
||||
f.PodClient().CreateSync(pod)
|
||||
e2eLags = append(e2eLags,
|
||||
framework.PodLatencyData{Name: pod.Name, Latency: unversioned.Now().Time.Sub(create.Time)})
|
||||
framework.PodLatencyData{Name: pod.Name, Latency: metav1.Now().Time.Sub(create.Time)})
|
||||
}
|
||||
batchLag := unversioned.Now().Time.Sub(batchStartTime.Time)
|
||||
batchLag := metav1.Now().Time.Sub(batchStartTime.Time)
|
||||
sort.Sort(framework.LatencySlice(e2eLags))
|
||||
return batchLag, e2eLags
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user