mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 05:03:09 +00:00
Correct spelling mistakes
Signed-off-by: yuxiaobo <yuxiaobogo@163.com>
This commit is contained in:
parent
74cbf0dc33
commit
81e9f21f83
@ -306,7 +306,7 @@ function kube::build::has_ip() {
|
|||||||
# Detect if a specific image exists
|
# Detect if a specific image exists
|
||||||
#
|
#
|
||||||
# $1 - image repo name
|
# $1 - image repo name
|
||||||
# #2 - image tag
|
# $2 - image tag
|
||||||
function kube::build::docker_image_exists() {
|
function kube::build::docker_image_exists() {
|
||||||
[[ -n $1 && -n $2 ]] || {
|
[[ -n $1 && -n $2 ]] || {
|
||||||
kube::log::error "Internal error. Image not specified in docker_image_exists."
|
kube::log::error "Internal error. Image not specified in docker_image_exists."
|
||||||
|
@ -52,7 +52,7 @@ func doRoundTrip(t *testing.T, internalVersion schema.GroupVersion, externalVers
|
|||||||
fuzzer.FuzzerFor(FuzzerFuncs, rand.NewSource(seed), legacyscheme.Codecs).
|
fuzzer.FuzzerFor(FuzzerFuncs, rand.NewSource(seed), legacyscheme.Codecs).
|
||||||
// We are explicitly overwriting custom fuzzing functions, to ensure
|
// We are explicitly overwriting custom fuzzing functions, to ensure
|
||||||
// that InitContainers and their statuses are not generated. This is
|
// that InitContainers and their statuses are not generated. This is
|
||||||
// because in thise test we are simply doing json operations, in which
|
// because in this test we are simply doing json operations, in which
|
||||||
// those disappear.
|
// those disappear.
|
||||||
Funcs(
|
Funcs(
|
||||||
func(s *api.PodSpec, c fuzz.Continue) {
|
func(s *api.PodSpec, c fuzz.Continue) {
|
||||||
|
@ -47,7 +47,7 @@ type PermissiveSigningPolicy struct {
|
|||||||
// TTL is the certificate TTL. It's used to calculate the NotAfter value of
|
// TTL is the certificate TTL. It's used to calculate the NotAfter value of
|
||||||
// the certificate.
|
// the certificate.
|
||||||
TTL time.Duration
|
TTL time.Duration
|
||||||
// Usages are the allowed usages of a certficate.
|
// Usages are the allowed usages of a certificate.
|
||||||
Usages []capi.KeyUsage
|
Usages []capi.KeyUsage
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -893,7 +893,7 @@ func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, nodeList []*v1.Node,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// syncNodes deletes given pods and creates new daemon set pods on the given nodes
|
// syncNodes deletes given pods and creates new daemon set pods on the given nodes
|
||||||
// returns slice with erros if any
|
// returns slice with errors if any
|
||||||
func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error {
|
func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error {
|
||||||
// We need to set expectations before creating/deleting pods to avoid race conditions.
|
// We need to set expectations before creating/deleting pods to avoid race conditions.
|
||||||
dsKey, err := controller.KeyFunc(ds)
|
dsKey, err := controller.KeyFunc(ds)
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
@ -61,7 +61,7 @@ type rangeAllocator struct {
|
|||||||
// This increases a throughput of CIDR assignment by not blocking on long operations.
|
// This increases a throughput of CIDR assignment by not blocking on long operations.
|
||||||
nodeCIDRUpdateChannel chan nodeReservedCIDRs
|
nodeCIDRUpdateChannel chan nodeReservedCIDRs
|
||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
// Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation
|
// Keep a set of nodes that are currently being processed to avoid races in CIDR allocation
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
nodesInProcessing sets.String
|
nodesInProcessing sets.String
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// TODO (k82cn): Figure out a reasonable number of workers/channels and propagate
|
// TODO (k82cn): Figure out a reasonable number of workers/channels and propagate
|
||||||
// the number of workers up making it a paramater of Run() function.
|
// the number of workers up making it a parameter of Run() function.
|
||||||
|
|
||||||
// NodeUpdateChannelSize defines the size of channel for node update events.
|
// NodeUpdateChannelSize defines the size of channel for node update events.
|
||||||
NodeUpdateChannelSize = 10
|
NodeUpdateChannelSize = 10
|
||||||
|
@ -222,7 +222,7 @@ func TestCreatePod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if podDeleted != item.expectDelete {
|
if podDeleted != item.expectDelete {
|
||||||
t.Errorf("%v: Unexepected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted)
|
t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted)
|
||||||
}
|
}
|
||||||
close(stopCh)
|
close(stopCh)
|
||||||
}
|
}
|
||||||
@ -319,7 +319,7 @@ func TestUpdatePod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if podDeleted != item.expectDelete {
|
if podDeleted != item.expectDelete {
|
||||||
t.Errorf("%v: Unexepected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted)
|
t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted)
|
||||||
}
|
}
|
||||||
close(stopCh)
|
close(stopCh)
|
||||||
}
|
}
|
||||||
@ -375,7 +375,7 @@ func TestCreateNode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if podDeleted != item.expectDelete {
|
if podDeleted != item.expectDelete {
|
||||||
t.Errorf("%v: Unexepected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted)
|
t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted)
|
||||||
}
|
}
|
||||||
close(stopCh)
|
close(stopCh)
|
||||||
}
|
}
|
||||||
@ -499,7 +499,7 @@ func TestUpdateNode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if podDeleted != item.expectDelete {
|
if podDeleted != item.expectDelete {
|
||||||
t.Errorf("%v: Unexepected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted)
|
t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted)
|
||||||
}
|
}
|
||||||
close(stopCh)
|
close(stopCh)
|
||||||
}
|
}
|
||||||
|
@ -348,7 +348,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
|||||||
if err := json.Unmarshal([]byte(obj.ObjectMeta.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]), &actualConditions); err != nil {
|
if err := json.Unmarshal([]byte(obj.ObjectMeta.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]), &actualConditions); err != nil {
|
||||||
return true, nil, err
|
return true, nil, err
|
||||||
}
|
}
|
||||||
// TODO: it's ok not to sort these becaues statusOk
|
// TODO: it's ok not to sort these because statusOk
|
||||||
// contains all the conditions, so we'll never be appending.
|
// contains all the conditions, so we'll never be appending.
|
||||||
// Default to statusOk when missing any specific conditions
|
// Default to statusOk when missing any specific conditions
|
||||||
if tc.expectedConditions == nil {
|
if tc.expectedConditions == nil {
|
||||||
|
@ -1081,7 +1081,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|||||||
manager.syncReplicaSet(GetKey(rs, t))
|
manager.syncReplicaSet(GetKey(rs, t))
|
||||||
|
|
||||||
if _, exists, err = manager.expectations.GetExpectations(rsKey); exists {
|
if _, exists, err = manager.expectations.GetExpectations(rsKey); exists {
|
||||||
t.Errorf("Found expectaions, expected none since the ReplicaSet has been deleted.")
|
t.Errorf("Found expectations, expected none since the ReplicaSet has been deleted.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// This should have no effect, since we've deleted the ReplicaSet.
|
// This should have no effect, since we've deleted the ReplicaSet.
|
||||||
|
@ -81,7 +81,7 @@ const (
|
|||||||
|
|
||||||
// serviceLoadBalancerFinalizerFeature is the feature gate name that
|
// serviceLoadBalancerFinalizerFeature is the feature gate name that
|
||||||
// enables Finalizer Protection for Service LoadBalancers.
|
// enables Finalizer Protection for Service LoadBalancers.
|
||||||
// orginated from: https://github.com/kubernetes/kubernetes/blob/28e800245e/pkg/features/kube_features.go#L433
|
// originated from: https://github.com/kubernetes/kubernetes/blob/28e800245e/pkg/features/kube_features.go#L433
|
||||||
serviceLoadBalancerFinalizerFeature = "ServiceLoadBalancerFinalizer"
|
serviceLoadBalancerFinalizerFeature = "ServiceLoadBalancerFinalizer"
|
||||||
|
|
||||||
// legacyNodeRoleBehaviro is the feature gate name that enables legacy
|
// legacyNodeRoleBehaviro is the feature gate name that enables legacy
|
||||||
|
@ -57,7 +57,7 @@ func TestIsMemberOf(t *testing.T) {
|
|||||||
set2.Name = "foo2"
|
set2.Name = "foo2"
|
||||||
pod := newStatefulSetPod(set, 1)
|
pod := newStatefulSetPod(set, 1)
|
||||||
if !isMemberOf(set, pod) {
|
if !isMemberOf(set, pod) {
|
||||||
t.Error("isMemberOf retruned false negative")
|
t.Error("isMemberOf returned false negative")
|
||||||
}
|
}
|
||||||
if isMemberOf(set2, pod) {
|
if isMemberOf(set2, pod) {
|
||||||
t.Error("isMemberOf returned false positive")
|
t.Error("isMemberOf returned false positive")
|
||||||
@ -90,7 +90,7 @@ func TestStorageMatches(t *testing.T) {
|
|||||||
set := newStatefulSet(3)
|
set := newStatefulSet(3)
|
||||||
pod := newStatefulSetPod(set, 1)
|
pod := newStatefulSetPod(set, 1)
|
||||||
if !storageMatches(set, pod) {
|
if !storageMatches(set, pod) {
|
||||||
t.Error("Newly created Pod has a invalid stroage")
|
t.Error("Newly created Pod has a invalid storage")
|
||||||
}
|
}
|
||||||
pod.Spec.Volumes = nil
|
pod.Spec.Volumes = nil
|
||||||
if storageMatches(set, pod) {
|
if storageMatches(set, pod) {
|
||||||
@ -144,7 +144,7 @@ func TestUpdateStorage(t *testing.T) {
|
|||||||
set := newStatefulSet(3)
|
set := newStatefulSet(3)
|
||||||
pod := newStatefulSetPod(set, 1)
|
pod := newStatefulSetPod(set, 1)
|
||||||
if !storageMatches(set, pod) {
|
if !storageMatches(set, pod) {
|
||||||
t.Error("Newly created Pod has a invalid stroage")
|
t.Error("Newly created Pod has a invalid storage")
|
||||||
}
|
}
|
||||||
pod.Spec.Volumes = nil
|
pod.Spec.Volumes = nil
|
||||||
if storageMatches(set, pod) {
|
if storageMatches(set, pod) {
|
||||||
|
@ -56,7 +56,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record.
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pod will be modified, so making copy is requiered.
|
// Pod will be modified, so making copy is required.
|
||||||
pod := pods[i].DeepCopy()
|
pod := pods[i].DeepCopy()
|
||||||
// Set reason and message in the pod object.
|
// Set reason and message in the pod object.
|
||||||
if _, err := SetPodTerminationReason(kubeClient, pod, nodeName); err != nil {
|
if _, err := SetPodTerminationReason(kubeClient, pod, nodeName); err != nil {
|
||||||
@ -122,7 +122,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName s
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pod will be modified, so making copy is requiered.
|
// Pod will be modified, so making copy is required.
|
||||||
pod := pods[i].DeepCopy()
|
pod := pods[i].DeepCopy()
|
||||||
for _, cond := range pod.Status.Conditions {
|
for _, cond := range pod.Status.Conditions {
|
||||||
if cond.Type == v1.PodReady {
|
if cond.Type == v1.PodReady {
|
||||||
|
@ -219,7 +219,7 @@ func (rc *reconciler) reconcile() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Trigger detach volume which requires verifing safe to detach step
|
// Trigger detach volume which requires verifying safe to detach step
|
||||||
// If timeout is true, skip verifySafeToDetach check
|
// If timeout is true, skip verifySafeToDetach check
|
||||||
klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting attacherDetacher.DetachVolume", ""))
|
klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting attacherDetacher.DetachVolume", ""))
|
||||||
verifySafeToDetach := !timeout
|
verifySafeToDetach := !timeout
|
||||||
|
@ -486,7 +486,7 @@ var (
|
|||||||
|
|
||||||
// wrapTestWithPluginCalls returns a testCall that:
|
// wrapTestWithPluginCalls returns a testCall that:
|
||||||
// - configures controller with a volume plugin that implements recycler,
|
// - configures controller with a volume plugin that implements recycler,
|
||||||
// deleter and provisioner. The plugin retunrs provided errors when a volume
|
// deleter and provisioner. The plugin returns provided errors when a volume
|
||||||
// is deleted, recycled or provisioned.
|
// is deleted, recycled or provisioned.
|
||||||
// - calls given testCall
|
// - calls given testCall
|
||||||
func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error, expectedProvisionCalls []provisionCall, toWrap testCall) testCall {
|
func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error, expectedProvisionCalls []provisionCall, toWrap testCall) testCall {
|
||||||
|
@ -1093,7 +1093,7 @@ func TestVolumeModeCheck(t *testing.T) {
|
|||||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||||
enableBlock: true,
|
enableBlock: true,
|
||||||
},
|
},
|
||||||
"feature enabled - pvc nil and pv filesytem": {
|
"feature enabled - pvc nil and pv filesystem": {
|
||||||
isExpectedMismatch: false,
|
isExpectedMismatch: false,
|
||||||
vol: createVolumeModeFilesystemTestVolume(),
|
vol: createVolumeModeFilesystemTestVolume(),
|
||||||
pvc: makeVolumeModePVC("8G", nil, nil),
|
pvc: makeVolumeModePVC("8G", nil, nil),
|
||||||
|
@ -98,7 +98,7 @@ var (
|
|||||||
volumeOperationErrorsMetric = metrics.NewCounterVec(
|
volumeOperationErrorsMetric = metrics.NewCounterVec(
|
||||||
&metrics.CounterOpts{
|
&metrics.CounterOpts{
|
||||||
Name: "volume_operation_total_errors",
|
Name: "volume_operation_total_errors",
|
||||||
Help: "Total volume operation erros",
|
Help: "Total volume operation errors",
|
||||||
StabilityLevel: metrics.ALPHA,
|
StabilityLevel: metrics.ALPHA,
|
||||||
},
|
},
|
||||||
[]string{"plugin_name", "operation_name"})
|
[]string{"plugin_name", "operation_name"})
|
||||||
|
@ -206,7 +206,7 @@ type PersistentVolumeController struct {
|
|||||||
|
|
||||||
// operationTimestamps caches start timestamp of operations
|
// operationTimestamps caches start timestamp of operations
|
||||||
// (currently provision + binding/deletion) for metric recording.
|
// (currently provision + binding/deletion) for metric recording.
|
||||||
// Detailed lifecyle/key for each operation
|
// Detailed lifecycle/key for each operation
|
||||||
// 1. provision + binding
|
// 1. provision + binding
|
||||||
// key: claimKey
|
// key: claimKey
|
||||||
// start time: user has NOT provide any volume ref in the claim AND
|
// start time: user has NOT provide any volume ref in the claim AND
|
||||||
@ -1071,7 +1071,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify the claim is in cache: if so, then it is a different PVC with the same name
|
// Verify the claim is in cache: if so, then it is a different PVC with the same name
|
||||||
// since the volume is known to be released at this moment. Ths new (cached) PVC must use
|
// since the volume is known to be released at this moment. The new (cached) PVC must use
|
||||||
// a different PV -- we checked that the PV is unused in isVolumeReleased.
|
// a different PV -- we checked that the PV is unused in isVolumeReleased.
|
||||||
// So the old PV is safe to be recycled.
|
// So the old PV is safe to be recycled.
|
||||||
claimName := claimrefToClaimKey(volume.Spec.ClaimRef)
|
claimName := claimrefToClaimKey(volume.Spec.ClaimRef)
|
||||||
|
@ -356,7 +356,7 @@ func requestNodeCertificate(client certificatesv1beta1.CertificateSigningRequest
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This digest should include all the relevant pieces of the CSR we care about.
|
// This digest should include all the relevant pieces of the CSR we care about.
|
||||||
// We can't direcly hash the serialized CSR because of random padding that we
|
// We can't directly hash the serialized CSR because of random padding that we
|
||||||
// regenerate every loop and we include usages which are not contained in the
|
// regenerate every loop and we include usages which are not contained in the
|
||||||
// CSR. This needs to be kept up to date as we add new fields to the node
|
// CSR. This needs to be kept up to date as we add new fields to the node
|
||||||
// certificates and with ensureCompatible.
|
// certificates and with ensureCompatible.
|
||||||
|
@ -85,7 +85,7 @@ func TestNodeAddressesUsesLastSuccess(t *testing.T) {
|
|||||||
cloud := &fake.Cloud{}
|
cloud := &fake.Cloud{}
|
||||||
manager := NewSyncManager(cloud, "defaultNode", 0).(*cloudResourceSyncManager)
|
manager := NewSyncManager(cloud, "defaultNode", 0).(*cloudResourceSyncManager)
|
||||||
|
|
||||||
// These tests are stateful and order dependant.
|
// These tests are stateful and order dependent.
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
addrs []v1.NodeAddress
|
addrs []v1.NodeAddress
|
||||||
|
@ -297,7 +297,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Infof("[topologymanager] Initilizing Topology Manager with %s policy", nodeConfig.ExperimentalTopologyManagerPolicy)
|
klog.Infof("[topologymanager] Initializing Topology Manager with %s policy", nodeConfig.ExperimentalTopologyManagerPolicy)
|
||||||
} else {
|
} else {
|
||||||
cm.topologyManager = topologymanager.NewFakeManager()
|
cm.topologyManager = topologymanager.NewFakeManager()
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ContainerCPUAssignments type used in cpu manger state
|
// ContainerCPUAssignments type used in cpu manager state
|
||||||
type ContainerCPUAssignments map[string]cpuset.CPUSet
|
type ContainerCPUAssignments map[string]cpuset.CPUSet
|
||||||
|
|
||||||
// Clone returns a copy of ContainerCPUAssignments
|
// Clone returns a copy of ContainerCPUAssignments
|
||||||
|
@ -465,11 +465,11 @@ func TestClearStateStateFile(t *testing.T) {
|
|||||||
|
|
||||||
state.ClearState()
|
state.ClearState()
|
||||||
if !cpuset.NewCPUSet().Equals(state.GetDefaultCPUSet()) {
|
if !cpuset.NewCPUSet().Equals(state.GetDefaultCPUSet()) {
|
||||||
t.Error("cleared state shoudn't has got information about available cpuset")
|
t.Error("cleared state shouldn't has got information about available cpuset")
|
||||||
}
|
}
|
||||||
for containerName := range testCase.containers {
|
for containerName := range testCase.containers {
|
||||||
if !cpuset.NewCPUSet().Equals(state.GetCPUSetOrDefault(containerName)) {
|
if !cpuset.NewCPUSet().Equals(state.GetCPUSetOrDefault(containerName)) {
|
||||||
t.Error("cleared state shoudn't has got information about containers")
|
t.Error("cleared state shouldn't has got information about containers")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -54,6 +54,6 @@ func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) {
|
|||||||
return "", fmt.Errorf("unsupported platform")
|
return "", fmt.Errorf("unsupported platform")
|
||||||
}
|
}
|
||||||
|
|
||||||
// applyExperimentalCreateConfig applys experimental configures from sandbox annotations.
|
// applyExperimentalCreateConfig applies experimental configures from sandbox annotations.
|
||||||
func applyExperimentalCreateConfig(createConfig *dockertypes.ContainerCreateConfig, annotations map[string]string) {
|
func applyExperimentalCreateConfig(createConfig *dockertypes.ContainerCreateConfig, annotations map[string]string) {
|
||||||
}
|
}
|
||||||
|
@ -293,7 +293,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
|
|||||||
|
|
||||||
// determine the set of thresholds we need to drive eviction behavior (i.e. all grace periods are met)
|
// determine the set of thresholds we need to drive eviction behavior (i.e. all grace periods are met)
|
||||||
thresholds = thresholdsMetGracePeriod(thresholdsFirstObservedAt, now)
|
thresholds = thresholdsMetGracePeriod(thresholdsFirstObservedAt, now)
|
||||||
debugLogThresholdsWithObservation("thresholds - grace periods satisified", thresholds, observations)
|
debugLogThresholdsWithObservation("thresholds - grace periods satisfied", thresholds, observations)
|
||||||
|
|
||||||
// update internal state
|
// update internal state
|
||||||
m.Lock()
|
m.Lock()
|
||||||
|
@ -927,7 +927,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|||||||
|
|
||||||
// no image gc should have occurred
|
// no image gc should have occurred
|
||||||
if diskGC.imageGCInvoked || diskGC.containerGCInvoked {
|
if diskGC.imageGCInvoked || diskGC.containerGCInvoked {
|
||||||
t.Errorf("Manager chose to perform image gc when it was not neeed")
|
t.Errorf("Manager chose to perform image gc when it was not needed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// no pod should have been killed
|
// no pod should have been killed
|
||||||
@ -950,7 +950,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|||||||
|
|
||||||
// no image gc should have occurred
|
// no image gc should have occurred
|
||||||
if diskGC.imageGCInvoked || diskGC.containerGCInvoked {
|
if diskGC.imageGCInvoked || diskGC.containerGCInvoked {
|
||||||
t.Errorf("Manager chose to perform image gc when it was not neeed")
|
t.Errorf("Manager chose to perform image gc when it was not needed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// no pod should have been killed
|
// no pod should have been killed
|
||||||
|
@ -329,7 +329,7 @@ func memoryUsage(memStats *statsapi.MemoryStats) *resource.Quantity {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// localVolumeNames returns the set of volumes for the pod that are local
|
// localVolumeNames returns the set of volumes for the pod that are local
|
||||||
// TODO: sumamry API should report what volumes consume local storage rather than hard-code here.
|
// TODO: summary API should report what volumes consume local storage rather than hard-code here.
|
||||||
func localVolumeNames(pod *v1.Pod) []string {
|
func localVolumeNames(pod *v1.Pod) []string {
|
||||||
result := []string{}
|
result := []string{}
|
||||||
for _, volume := range pod.Spec.Volumes {
|
for _, volume := range pod.Spec.Volumes {
|
||||||
|
@ -133,7 +133,7 @@ func (n *linuxCgroupNotifier) Start(eventCh chan<- struct{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// wait waits up to notifierRefreshInterval for an event on the Epoll FD for the
|
// wait waits up to notifierRefreshInterval for an event on the Epoll FD for the
|
||||||
// eventfd we are concerned about. It returns an error if one occurrs, and true
|
// eventfd we are concerned about. It returns an error if one occurs, and true
|
||||||
// if the consumer should read from the eventfd.
|
// if the consumer should read from the eventfd.
|
||||||
func wait(epfd, eventfd int, timeout time.Duration) (bool, error) {
|
func wait(epfd, eventfd int, timeout time.Duration) (bool, error) {
|
||||||
events := make([]unix.EpollEvent, numFdEvents+1)
|
events := make([]unix.EpollEvent, numFdEvents+1)
|
||||||
|
@ -899,7 +899,7 @@ func (kl *Kubelet) IsPodDeleted(uid types.UID) bool {
|
|||||||
// been reclaimed by the kubelet. Reclaiming resources is a prerequisite to deleting a pod from the API server.
|
// been reclaimed by the kubelet. Reclaiming resources is a prerequisite to deleting a pod from the API server.
|
||||||
func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool {
|
func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool {
|
||||||
if !notRunning(status.ContainerStatuses) {
|
if !notRunning(status.ContainerStatuses) {
|
||||||
// We shouldnt delete pods that still have running containers
|
// We shouldn't delete pods that still have running containers
|
||||||
klog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod))
|
klog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -918,7 +918,7 @@ func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bo
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if kl.podVolumesExist(pod.UID) && !kl.keepTerminatedPodVolumes {
|
if kl.podVolumesExist(pod.UID) && !kl.keepTerminatedPodVolumes {
|
||||||
// We shouldnt delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes
|
// We shouldn't delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes
|
||||||
klog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod))
|
klog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -327,7 +327,7 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
// Now try to create a container, which should in turn invoke PostStart Hook
|
// Now try to create a container, which should in turn invoke PostStart Hook
|
||||||
_, err := m.startContainer(fakeSandBox.Id, fakeSandBoxConfig, testContainer, testPod, fakePodStatus, nil, "")
|
_, err := m.startContainer(fakeSandBox.Id, fakeSandBoxConfig, testContainer, testPod, fakePodStatus, nil, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("startContainer erro =%v", err)
|
t.Errorf("startContainer error =%v", err)
|
||||||
}
|
}
|
||||||
if fakeRunner.Cmd[0] != cmdPostStart.PostStart.Exec.Command[0] {
|
if fakeRunner.Cmd[0] != cmdPostStart.PostStart.Exec.Command[0] {
|
||||||
t.Errorf("CMD PostStart hook was not invoked")
|
t.Errorf("CMD PostStart hook was not invoked")
|
||||||
|
@ -719,7 +719,7 @@ func TestComputePodActions(t *testing.T) {
|
|||||||
_, _, m, err := createTestRuntimeManager()
|
_, _, m, err := createTestRuntimeManager()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Createing a pair reference pod and status for the test cases to refer
|
// Creating a pair reference pod and status for the test cases to refer
|
||||||
// the specific fields.
|
// the specific fields.
|
||||||
basePod, baseStatus := makeBasePodAndStatus()
|
basePod, baseStatus := makeBasePodAndStatus()
|
||||||
noAction := podActions{
|
noAction := podActions{
|
||||||
|
@ -295,7 +295,7 @@ func isInUse(l string, logs []string) bool {
|
|||||||
if strings.HasSuffix(l, tmpSuffix) {
|
if strings.HasSuffix(l, tmpSuffix) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
// All compresed logs are in use.
|
// All compressed logs are in use.
|
||||||
if strings.HasSuffix(l, compressSuffix) {
|
if strings.HasSuffix(l, compressSuffix) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -63,7 +63,7 @@ func NewOperationExecutor(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ActualStateOfWorldUpdater defines a set of operations updating the actual
|
// ActualStateOfWorldUpdater defines a set of operations updating the actual
|
||||||
// state of the world cache after successful registeration/deregistration.
|
// state of the world cache after successful registration/deregistration.
|
||||||
type ActualStateOfWorldUpdater interface {
|
type ActualStateOfWorldUpdater interface {
|
||||||
// AddPlugin add the given plugin in the cache if no existing plugin
|
// AddPlugin add the given plugin in the cache if no existing plugin
|
||||||
// in the cache has the same socket path.
|
// in the cache has the same socket path.
|
||||||
|
@ -59,7 +59,7 @@ type Manager interface {
|
|||||||
// whether the pod is found.
|
// whether the pod is found.
|
||||||
GetPodByUID(types.UID) (*v1.Pod, bool)
|
GetPodByUID(types.UID) (*v1.Pod, bool)
|
||||||
// GetPodByMirrorPod returns the static pod for the given mirror pod and
|
// GetPodByMirrorPod returns the static pod for the given mirror pod and
|
||||||
// whether it was known to the pod manger.
|
// whether it was known to the pod manager.
|
||||||
GetPodByMirrorPod(*v1.Pod) (*v1.Pod, bool)
|
GetPodByMirrorPod(*v1.Pod) (*v1.Pod, bool)
|
||||||
// GetMirrorPodByPod returns the mirror pod for the given static pod and
|
// GetMirrorPodByPod returns the mirror pod for the given static pod and
|
||||||
// whether it was known to the pod manager.
|
// whether it was known to the pod manager.
|
||||||
|
@ -672,7 +672,7 @@ func (p *criStatsProvider) getAndUpdateContainerUsageNanoCores(stats *runtimeapi
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This should not happen. Log now to raise visiblity
|
// This should not happen. Log now to raise visibility
|
||||||
klog.Errorf("failed updating cpu usage nano core: %v", err)
|
klog.Errorf("failed updating cpu usage nano core: %v", err)
|
||||||
}
|
}
|
||||||
return usage
|
return usage
|
||||||
|
@ -335,9 +335,9 @@ func TestSyncPodNoDeadlock(t *testing.T) {
|
|||||||
client.AddReactor("*", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
client.AddReactor("*", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
switch action := action.(type) {
|
switch action := action.(type) {
|
||||||
case core.GetAction:
|
case core.GetAction:
|
||||||
assert.Equal(t, pod.Name, action.GetName(), "Unexpeted GetAction: %+v", action)
|
assert.Equal(t, pod.Name, action.GetName(), "Unexpected GetAction: %+v", action)
|
||||||
case core.UpdateAction:
|
case core.UpdateAction:
|
||||||
assert.Equal(t, pod.Name, action.GetObject().(*v1.Pod).Name, "Unexpeted UpdateAction: %+v", action)
|
assert.Equal(t, pod.Name, action.GetObject().(*v1.Pod).Name, "Unexpected UpdateAction: %+v", action)
|
||||||
default:
|
default:
|
||||||
assert.Fail(t, "Unexpected Action: %+v", action)
|
assert.Fail(t, "Unexpected Action: %+v", action)
|
||||||
}
|
}
|
||||||
|
@ -389,7 +389,7 @@ func TestCacheInvalidation(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
manager.RegisterPod(podWithSecrets("ns1", "name1", s1))
|
manager.RegisterPod(podWithSecrets("ns1", "name1", s1))
|
||||||
// Fetch both secrets - this should triggger get operations.
|
// Fetch both secrets - this should trigger get operations.
|
||||||
store.Get("ns1", "s1")
|
store.Get("ns1", "s1")
|
||||||
store.Get("ns1", "s10")
|
store.Get("ns1", "s10")
|
||||||
store.Get("ns1", "s2")
|
store.Get("ns1", "s2")
|
||||||
|
@ -62,7 +62,7 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
// setUp is a convience function for setting up for (most) tests.
|
// setUp is a convenience function for setting up for (most) tests.
|
||||||
func setUp(t *testing.T) (*etcd3testing.EtcdTestServer, Config, *assert.Assertions) {
|
func setUp(t *testing.T) (*etcd3testing.EtcdTestServer, Config, *assert.Assertions) {
|
||||||
server, storageConfig := etcd3testing.NewUnsecuredEtcd3TestClientServer(t)
|
server, storageConfig := etcd3testing.NewUnsecuredEtcd3TestClientServer(t)
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ type KubeProxyWinkernelConfiguration struct {
|
|||||||
// networkName is the name of the network kube-proxy will use
|
// networkName is the name of the network kube-proxy will use
|
||||||
// to create endpoints and policies
|
// to create endpoints and policies
|
||||||
NetworkName string
|
NetworkName string
|
||||||
// sourceVip is the IP address of the source VIP endoint used for
|
// sourceVip is the IP address of the source VIP endpoint used for
|
||||||
// NAT when loadbalancing
|
// NAT when loadbalancing
|
||||||
SourceVip string
|
SourceVip string
|
||||||
// enableDSR tells kube-proxy whether HNS policies should be created
|
// enableDSR tells kube-proxy whether HNS policies should be created
|
||||||
|
@ -152,7 +152,7 @@ func (podStrategyWithoutGraceful) CheckGracefulDelete(ctx context.Context, obj r
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// StrategyWithoutGraceful implements the legacy instant delele behavior.
|
// StrategyWithoutGraceful implements the legacy instant delete behavior.
|
||||||
var StrategyWithoutGraceful = podStrategyWithoutGraceful{Strategy}
|
var StrategyWithoutGraceful = podStrategyWithoutGraceful{Strategy}
|
||||||
|
|
||||||
type podStatusStrategy struct {
|
type podStatusStrategy struct {
|
||||||
|
@ -161,7 +161,7 @@ type PolicyData struct {
|
|||||||
|
|
||||||
func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc {
|
func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc {
|
||||||
return func(hookContext genericapiserver.PostStartHookContext) error {
|
return func(hookContext genericapiserver.PostStartHookContext) error {
|
||||||
// intializing roles is really important. On some e2e runs, we've seen cases where etcd is down when the server
|
// initializing roles is really important. On some e2e runs, we've seen cases where etcd is down when the server
|
||||||
// starts, the roles don't initialize, and nothing works.
|
// starts, the roles don't initialize, and nothing works.
|
||||||
err := wait.Poll(1*time.Second, 30*time.Second, func() (done bool, err error) {
|
err := wait.Poll(1*time.Second, 30*time.Second, func() (done bool, err error) {
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type AuthorizationRuleResolver interface {
|
type AuthorizationRuleResolver interface {
|
||||||
// GetRoleReferenceRules attempts to resolve the role reference of a RoleBinding or ClusterRoleBinding. The passed namespace should be the namepsace
|
// GetRoleReferenceRules attempts to resolve the role reference of a RoleBinding or ClusterRoleBinding. The passed namespace should be the namespace
|
||||||
// of the role binding, the empty string if a cluster role binding.
|
// of the role binding, the empty string if a cluster role binding.
|
||||||
GetRoleReferenceRules(roleRef rbacv1.RoleRef, namespace string) ([]rbacv1.PolicyRule, error)
|
GetRoleReferenceRules(roleRef rbacv1.RoleRef, namespace string) ([]rbacv1.PolicyRule, error)
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ func TestVolumeAttachmentStrategy(t *testing.T) {
|
|||||||
Strategy.PrepareForUpdate(ctx, statusVolumeAttachment, volumeAttachment)
|
Strategy.PrepareForUpdate(ctx, statusVolumeAttachment, volumeAttachment)
|
||||||
|
|
||||||
if !apiequality.Semantic.DeepEqual(statusVolumeAttachment, volumeAttachment) {
|
if !apiequality.Semantic.DeepEqual(statusVolumeAttachment, volumeAttachment) {
|
||||||
t.Errorf("unexpected objects difference after modfying status: %v", diff.ObjectDiff(statusVolumeAttachment, volumeAttachment))
|
t.Errorf("unexpected objects difference after modifying status: %v", diff.ObjectDiff(statusVolumeAttachment, volumeAttachment))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1037,7 +1037,7 @@ func NewServiceAffinityPredicate(nodeInfoLister schedulerlisters.NodeInfoLister,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// checkServiceAffinity is a predicate which matches nodes in such a way to force that
|
// checkServiceAffinity is a predicate which matches nodes in such a way to force that
|
||||||
// ServiceAffinity.labels are homogenous for pods that are scheduled to a node.
|
// ServiceAffinity.labels are homogeneous for pods that are scheduled to a node.
|
||||||
// (i.e. it returns true IFF this pod can be added to this node such that all other pods in
|
// (i.e. it returns true IFF this pod can be added to this node such that all other pods in
|
||||||
// the same service are running on nodes with the exact same ServiceAffinity.label values).
|
// the same service are running on nodes with the exact same ServiceAffinity.label values).
|
||||||
//
|
//
|
||||||
|
@ -4005,7 +4005,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
"nodeA": false,
|
"nodeA": false,
|
||||||
"nodeB": false,
|
"nodeB": false,
|
||||||
},
|
},
|
||||||
name: "Test incoming pod's affinity: firstly check if all affinityTerms match, and then check if all topologyKeys match, and the match logic should be satified on the same pod",
|
name: "Test incoming pod's affinity: firstly check if all affinityTerms match, and then check if all topologyKeys match, and the match logic should be satisfied on the same pod",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4758,7 +4758,7 @@ func TestEvenPodsSpreadPredicate_SingleConstraint(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "existing pods with mis-matched namespace doens't count",
|
name: "existing pods with mis-matched namespace doesn't count",
|
||||||
pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint(
|
pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint(
|
||||||
1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(),
|
1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(),
|
||||||
).Obj(),
|
).Obj(),
|
||||||
|
@ -357,7 +357,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
|
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
|
||||||
name: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
name: "Affinity symmetry: considered only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
@ -371,7 +371,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
|
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
|
||||||
name: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
name: "Affinity symmetry: considered RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||||
},
|
},
|
||||||
|
|
||||||
// The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity.
|
// The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity.
|
||||||
|
@ -1426,7 +1426,7 @@ func TestMultipleNodes(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
wantStatuses: []*framework.Status{unschedulableAndUnresolvable, unschedulableAndUnresolvable},
|
wantStatuses: []*framework.Status{unschedulableAndUnresolvable, unschedulableAndUnresolvable},
|
||||||
name: "Test incoming pod's affinity: firstly check if all affinityTerms match, and then check if all topologyKeys match, and the match logic should be satified on the same pod",
|
name: "Test incoming pod's affinity: firstly check if all affinityTerms match, and then check if all topologyKeys match, and the match logic should be satisfied on the same pod",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ func TestPodTopologySpread_Filter_SingleConstraint(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "existing pods with mis-matched namespace doens't count",
|
name: "existing pods with mis-matched namespace doesn't count",
|
||||||
pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint(
|
pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint(
|
||||||
1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(),
|
1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(),
|
||||||
).Obj(),
|
).Obj(),
|
||||||
|
4
pkg/scheduler/internal/cache/cache_test.go
vendored
4
pkg/scheduler/internal/cache/cache_test.go
vendored
@ -37,7 +37,7 @@ import (
|
|||||||
|
|
||||||
func deepEqualWithoutGeneration(t *testing.T, testcase int, actual *nodeInfoListItem, expected *schedulernodeinfo.NodeInfo) {
|
func deepEqualWithoutGeneration(t *testing.T, testcase int, actual *nodeInfoListItem, expected *schedulernodeinfo.NodeInfo) {
|
||||||
if (actual == nil) != (expected == nil) {
|
if (actual == nil) != (expected == nil) {
|
||||||
t.Error("One of the actual or expeted is nil and the other is not!")
|
t.Error("One of the actual or expected is nil and the other is not!")
|
||||||
}
|
}
|
||||||
// Ignore generation field.
|
// Ignore generation field.
|
||||||
if actual != nil {
|
if actual != nil {
|
||||||
@ -386,7 +386,7 @@ func TestSnapshot(t *testing.T) {
|
|||||||
|
|
||||||
snapshot := cache.Snapshot()
|
snapshot := cache.Snapshot()
|
||||||
if len(snapshot.Nodes) != len(cache.nodes) {
|
if len(snapshot.Nodes) != len(cache.nodes) {
|
||||||
t.Errorf("Unequal number of nodes in the cache and its snapshot. expeted: %v, got: %v", len(cache.nodes), len(snapshot.Nodes))
|
t.Errorf("Unequal number of nodes in the cache and its snapshot. expected: %v, got: %v", len(cache.nodes), len(snapshot.Nodes))
|
||||||
}
|
}
|
||||||
for name, ni := range snapshot.Nodes {
|
for name, ni := range snapshot.Nodes {
|
||||||
nItem := cache.nodes[name]
|
nItem := cache.nodes[name]
|
||||||
|
Loading…
Reference in New Issue
Block a user