mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
improve code
This commit is contained in:
parent
b188868fd9
commit
f6ea2a61da
@ -524,7 +524,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
|
|||||||
return controller
|
return controller
|
||||||
}
|
}
|
||||||
|
|
||||||
// createBatchPodSequential creats pods back-to-back in sequence.
|
// createBatchPodSequential creates pods back-to-back in sequence.
|
||||||
func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod) (time.Duration, []framework.PodLatencyData) {
|
func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod) (time.Duration, []framework.PodLatencyData) {
|
||||||
batchStartTime := metav1.Now()
|
batchStartTime := metav1.Now()
|
||||||
e2eLags := make([]framework.PodLatencyData, 0)
|
e2eLags := make([]framework.PodLatencyData, 0)
|
||||||
@ -570,7 +570,7 @@ func logPodCreateThroughput(batchLag time.Duration, e2eLags []framework.PodLaten
|
|||||||
logPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testInfo), "throughput")
|
logPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testInfo), "throughput")
|
||||||
}
|
}
|
||||||
|
|
||||||
// increaseKubeletAPIQPSLimit sets Kubelet API QPS via ConfigMap. Kubelet will restart with the new QPS.
|
// setKubeletAPIQPSLimit sets Kubelet API QPS via ConfigMap. Kubelet will restart with the new QPS.
|
||||||
func setKubeletAPIQPSLimit(f *framework.Framework, newAPIQPS int32) {
|
func setKubeletAPIQPSLimit(f *framework.Framework, newAPIQPS int32) {
|
||||||
const restartGap = 40 * time.Second
|
const restartGap = 40 * time.Second
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ func isDockerLiveRestoreEnabled() (bool, error) {
|
|||||||
return info.LiveRestoreEnabled, nil
|
return info.LiveRestoreEnabled, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// stopDockerDaemon starts the Docker daemon.
|
// startDockerDaemon starts the Docker daemon.
|
||||||
func startDockerDaemon() error {
|
func startDockerDaemon() error {
|
||||||
switch {
|
switch {
|
||||||
case systemdutil.IsRunningSystemd():
|
case systemdutil.IsRunningSystemd():
|
||||||
|
@ -92,8 +92,11 @@ func (rp *remotePuller) Name() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rp *remotePuller) Pull(image string) ([]byte, error) {
|
func (rp *remotePuller) Pull(image string) ([]byte, error) {
|
||||||
// TODO(runcom): should we check if the image is already pulled with ImageStatus?
|
imageStatus, err := rp.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image})
|
||||||
_, err := rp.imageService.PullImage(&runtimeapi.ImageSpec{Image: image}, nil)
|
if err == nil && imageStatus != nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
_, err = rp.imageService.PullImage(&runtimeapi.ImageSpec{Image: image}, nil)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -534,7 +534,7 @@ func getContainer(pid int) (string, error) {
|
|||||||
return "", cgroups.NewNotFoundError("memory")
|
return "", cgroups.NewNotFoundError("memory")
|
||||||
}
|
}
|
||||||
|
|
||||||
// since we use this container for accounting, we need to ensure its a unified hierarchy.
|
// since we use this container for accounting, we need to ensure it is a unified hierarchy.
|
||||||
if cpu != memory {
|
if cpu != memory {
|
||||||
return "", fmt.Errorf("cpu and memory cgroup hierarchy not unified. cpu: %s, memory: %s", cpu, memory)
|
return "", fmt.Errorf("cpu and memory cgroup hierarchy not unified. cpu: %s, memory: %s", cpu, memory)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user