Merge pull request #122698 from daniel-hutao/feat-1

Code Cleanup: Redundant String Conversions and Spelling/Grammar Corrections
This commit is contained in:
Kubernetes Prow Robot 2024-02-05 16:57:07 -08:00 committed by GitHub
commit 244fbf94fd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 21 additions and 21 deletions

View File

@ -63,7 +63,7 @@ type DataV1 struct {
}
// NewV1 returns an instance of Checkpoint, in V1 (k8s <= 1.19) format.
// Users should avoid creating checkpoints in formats different than the most recent one,
// Users should avoid creating checkpoints in formats different from the most recent one,
// use the old formats only to validate existing checkpoint and convert them to most recent
// format. The only exception should be test code.
func NewV1(devEntries []PodDevicesEntryV1,

View File

@ -74,10 +74,10 @@ type ManagerImpl struct {
// allDevices holds all the devices currently registered to the device manager
allDevices ResourceDeviceInstances
// healthyDevices contains all of the registered healthy resourceNames and their exported device IDs.
// healthyDevices contains all the registered healthy resourceNames and their exported device IDs.
healthyDevices map[string]sets.Set[string]
// unhealthyDevices contains all of the unhealthy devices and their exported device IDs.
// unhealthyDevices contains all the unhealthy devices and their exported device IDs.
unhealthyDevices map[string]sets.Set[string]
// allocatedDevices contains allocated deviceIds, keyed by resourceName.
@ -90,7 +90,7 @@ type ManagerImpl struct {
// List of NUMA Nodes available on the underlying machine
numaNodes []int
// Store of Topology Affinties that the Device Manager can query.
// Store of Topology Affinities that the Device Manager can query.
topologyAffinityStore topologymanager.Store
// devicesToReuse contains devices that can be reused as they have been allocated to
@ -560,12 +560,12 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
// This can happen if a container restarts for example.
devices := m.podDevices.containerDevices(podUID, contName, resource)
if devices != nil {
klog.V(3).InfoS("Found pre-allocated devices for resource on pod", "resourceName", resource, "containerName", contName, "podUID", string(podUID), "devices", sets.List(devices))
klog.V(3).InfoS("Found pre-allocated devices for resource on pod", "resourceName", resource, "containerName", contName, "podUID", podUID, "devices", sets.List(devices))
needed = needed - devices.Len()
// A pod's resource is not expected to change once admitted by the API server,
// so just fail loudly here. We can revisit this part if this no longer holds.
if needed != 0 {
return nil, fmt.Errorf("pod %q container %q changed request for resource %q from %d to %d", string(podUID), contName, resource, devices.Len(), required)
return nil, fmt.Errorf("pod %q container %q changed request for resource %q from %d to %d", podUID, contName, resource, devices.Len(), required)
}
}
@ -576,16 +576,16 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
// note: if we get this far the runtime is surely running. This is usually enforced at OS level by startup system services dependencies.
// First we take care of the exceptional flow (scenarios 2 and 3). In both flows, kubelet is reinitializing, and while kubelet is initializing, sources are NOT all ready.
// Is this a simple kubelet restart (scenario 2)? To distinguish, we use the informations we got for runtime. If we are asked to allocate devices for containers reported
// Is this a simple kubelet restart (scenario 2)? To distinguish, we use the information we got for runtime. If we are asked to allocate devices for containers reported
// running, then it can only be a kubelet restart. On node reboot the runtime and the containers were also shut down. Then, if the container was running, it can only be
// because it already has access to all the required devices, so we got nothing to do and we can bail out.
if !m.sourcesReady.AllReady() && m.isContainerAlreadyRunning(podUID, contName) {
klog.V(3).InfoS("container detected running, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", string(podUID), "containerName", contName)
klog.V(3).InfoS("container detected running, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
return nil, nil
}
// We dealt with scenario 2. If we got this far it's either scenario 3 (node reboot) or scenario 1 (steady state, normal flow).
klog.V(3).InfoS("Need devices to allocate for pod", "deviceNumber", needed, "resourceName", resource, "podUID", string(podUID), "containerName", contName)
klog.V(3).InfoS("Need devices to allocate for pod", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
healthyDevices, hasRegistered := m.healthyDevices[resource]
// The following checks are expected to fail only happen on scenario 3 (node reboot).
@ -611,7 +611,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
// We handled the known error paths in scenario 3 (node reboot), so from now on we can fall back in a common path.
// We cover container restart on kubelet steady state with the same flow.
if needed == 0 {
klog.V(3).InfoS("no devices needed, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", string(podUID), "containerName", contName)
klog.V(3).InfoS("no devices needed, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
// No change, no work.
return nil, nil
}
@ -676,7 +676,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
}
// If we can't allocate all remaining devices from the set of aligned ones,
// then start by first allocating all of the aligned devices (to ensure
// then start by first allocating all the aligned devices (to ensure
// that the alignment guaranteed by the TopologyManager is honored).
if allocateRemainingFrom(aligned) {
return allocated, nil
@ -734,7 +734,7 @@ func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, availa
}
}
// Get a flat list of all of the nodes associated with available devices.
// Get a flat list of all the nodes associated with available devices.
var nodes []int
for node := range perNodeDevices {
nodes = append(nodes, node)
@ -973,12 +973,12 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s
devices := m.podDevices.containerDevices(podUID, contName, resource)
if devices == nil {
m.mutex.Unlock()
return fmt.Errorf("no devices found allocated in local cache for pod %s, container %s, resource %s", string(podUID), contName, resource)
return fmt.Errorf("no devices found allocated in local cache for pod %s, container %s, resource %s", podUID, contName, resource)
}
m.mutex.Unlock()
devs := devices.UnsortedList()
klog.V(4).InfoS("Issuing a PreStartContainer call for container", "containerName", contName, "podUID", string(podUID))
klog.V(4).InfoS("Issuing a PreStartContainer call for container", "containerName", contName, "podUID", podUID)
_, err := eI.e.preStartContainer(devs)
if err != nil {
return fmt.Errorf("device plugin PreStartContainer rpc failed with err: %v", err)
@ -1001,7 +1001,7 @@ func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, re
}
m.mutex.Unlock()
klog.V(4).InfoS("Issuing a GetPreferredAllocation call for container", "containerName", contName, "podUID", string(podUID))
klog.V(4).InfoS("Issuing a GetPreferredAllocation call for container", "containerName", contName, "podUID", podUID)
resp, err := eI.e.getPreferredAllocation(available.UnsortedList(), mustInclude.UnsortedList(), size)
m.mutex.Lock()
if err != nil {

View File

@ -89,7 +89,7 @@ func (s *server) Start() error {
}
}
// For now we leave cleanup of the *entire* directory up to the Handler
// For now, we leave cleanup of the *entire* directory up to the Handler
// (even though we should in theory be able to just wipe the whole directory)
// because the Handler stores its checkpoint file (amongst others) in here.
if err := s.rhandler.CleanupPluginDirectory(s.socketDir); err != nil {

View File

@ -163,7 +163,7 @@ func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName strin
}
}
// Returns all of devices allocated to the pods being tracked, keyed by resourceName.
// Returns all devices allocated to the pods being tracked, keyed by resourceName.
func (pdev *podDevices) devices() map[string]sets.Set[string] {
ret := make(map[string]sets.Set[string])
pdev.RLock()

View File

@ -38,7 +38,7 @@ func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map
// Garbage collect any stranded device resources before providing TopologyHints
m.UpdateAllocatedDevices()
// Loop through all device resources and generate TopologyHints for them..
// Loop through all device resources and generate TopologyHints for them.
deviceHints := make(map[string][]topologymanager.TopologyHint)
accumulatedResourceRequests := m.getContainerDeviceRequest(container)
@ -171,7 +171,7 @@ func (m *ManagerImpl) generateDeviceTopologyHints(resource string, available set
minAffinitySize = mask.Count()
}
// Then check to see if all of the reusable devices are part of the bitmask.
// Then check to see if all the reusable devices are part of the bitmask.
numMatching := 0
for d := range reusable {
// Skip the device if it doesn't specify any topology info.

View File

@ -48,11 +48,11 @@ type PluginHandler interface {
// Validate returns an error if the information provided by
// the potential plugin is erroneous (unsupported version, ...)
ValidatePlugin(pluginName string, endpoint string, versions []string) error
// RegisterPlugin is called so that the plugin can be register by any
// RegisterPlugin is called so that the plugin can be registered by any
// plugin consumer
// Error encountered here can still be Notified to the plugin.
RegisterPlugin(pluginName, endpoint string, versions []string) error
// DeRegister is called once the pluginwatcher observes that the socket has
// DeRegisterPlugin is called once the pluginwatcher observes that the socket has
// been deleted.
DeRegisterPlugin(pluginName string)
}