mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
Merge pull request #101302 from wangyx1992/capatial-log-kubelet
cleanup: fix errors in wrapped format and log capitalization in kubelet
This commit is contained in:
commit
0cc2bf2931
@ -441,10 +441,10 @@ func Run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
|||||||
// To help debugging, immediately log version
|
// To help debugging, immediately log version
|
||||||
klog.InfoS("Kubelet version", "kubeletVersion", version.Get())
|
klog.InfoS("Kubelet version", "kubeletVersion", version.Get())
|
||||||
if err := initForOS(s.KubeletFlags.WindowsService, s.KubeletFlags.WindowsPriorityClass); err != nil {
|
if err := initForOS(s.KubeletFlags.WindowsService, s.KubeletFlags.WindowsPriorityClass); err != nil {
|
||||||
return fmt.Errorf("failed OS init: %v", err)
|
return fmt.Errorf("failed OS init: %w", err)
|
||||||
}
|
}
|
||||||
if err := run(ctx, s, kubeDeps, featureGate); err != nil {
|
if err := run(ctx, s, kubeDeps, featureGate); err != nil {
|
||||||
return fmt.Errorf("failed to run Kubelet: %v", err)
|
return fmt.Errorf("failed to run Kubelet: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -500,11 +500,11 @@ func getReservedCPUs(machineInfo *cadvisorapi.MachineInfo, cpus string) (cpuset.
|
|||||||
|
|
||||||
topo, err := topology.Discover(machineInfo)
|
topo, err := topology.Discover(machineInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return emptyCPUSet, fmt.Errorf("Unable to discover CPU topology info: %s", err)
|
return emptyCPUSet, fmt.Errorf("unable to discover CPU topology info: %s", err)
|
||||||
}
|
}
|
||||||
reservedCPUSet, err := cpuset.Parse(cpus)
|
reservedCPUSet, err := cpuset.Parse(cpus)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return emptyCPUSet, fmt.Errorf("Unable to parse reserved-cpus list: %s", err)
|
return emptyCPUSet, fmt.Errorf("unable to parse reserved-cpus list: %s", err)
|
||||||
}
|
}
|
||||||
allCPUSet := topo.CPUDetails.CPUs()
|
allCPUSet := topo.CPUDetails.CPUs()
|
||||||
if !reservedCPUSet.IsSubsetOf(allCPUSet) {
|
if !reservedCPUSet.IsSubsetOf(allCPUSet) {
|
||||||
@ -532,7 +532,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
|||||||
if s.LockFilePath != "" {
|
if s.LockFilePath != "" {
|
||||||
klog.InfoS("Acquiring file lock", "path", s.LockFilePath)
|
klog.InfoS("Acquiring file lock", "path", s.LockFilePath)
|
||||||
if err := flock.Acquire(s.LockFilePath); err != nil {
|
if err := flock.Acquire(s.LockFilePath); err != nil {
|
||||||
return fmt.Errorf("unable to acquire file lock on %q: %v", s.LockFilePath, err)
|
return fmt.Errorf("unable to acquire file lock on %q: %w", s.LockFilePath, err)
|
||||||
}
|
}
|
||||||
if s.ExitOnLockContention {
|
if s.ExitOnLockContention {
|
||||||
klog.InfoS("Watching for inotify events", "path", s.LockFilePath)
|
klog.InfoS("Watching for inotify events", "path", s.LockFilePath)
|
||||||
@ -608,7 +608,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
|||||||
|
|
||||||
kubeDeps.KubeClient, err = clientset.NewForConfig(clientConfig)
|
kubeDeps.KubeClient, err = clientset.NewForConfig(clientConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to initialize kubelet client: %v", err)
|
return fmt.Errorf("failed to initialize kubelet client: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// make a separate client for events
|
// make a separate client for events
|
||||||
@ -617,7 +617,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
|||||||
eventClientConfig.Burst = int(s.EventBurst)
|
eventClientConfig.Burst = int(s.EventBurst)
|
||||||
kubeDeps.EventClient, err = v1core.NewForConfig(&eventClientConfig)
|
kubeDeps.EventClient, err = v1core.NewForConfig(&eventClientConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to initialize kubelet event client: %v", err)
|
return fmt.Errorf("failed to initialize kubelet event client: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// make a separate client for heartbeat with throttling disabled and a timeout attached
|
// make a separate client for heartbeat with throttling disabled and a timeout attached
|
||||||
@ -632,7 +632,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
|||||||
heartbeatClientConfig.QPS = float32(-1)
|
heartbeatClientConfig.QPS = float32(-1)
|
||||||
kubeDeps.HeartbeatClient, err = clientset.NewForConfig(&heartbeatClientConfig)
|
kubeDeps.HeartbeatClient, err = clientset.NewForConfig(&heartbeatClientConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to initialize kubelet heartbeat client: %v", err)
|
return fmt.Errorf("failed to initialize kubelet heartbeat client: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -912,7 +912,7 @@ func buildKubeletClientConfig(ctx context.Context, s *options.KubeletServer, nod
|
|||||||
&clientcmd.ConfigOverrides{},
|
&clientcmd.ConfigOverrides{},
|
||||||
).ClientConfig()
|
).ClientConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("invalid kubeconfig: %v", err)
|
return nil, nil, fmt.Errorf("invalid kubeconfig: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
kubeClientConfigOverrides(s, clientConfig)
|
kubeClientConfigOverrides(s, clientConfig)
|
||||||
@ -986,7 +986,7 @@ func getNodeName(cloud cloudprovider.Interface, hostname string) (types.NodeName
|
|||||||
|
|
||||||
nodeName, err := instances.CurrentNodeName(context.TODO(), hostname)
|
nodeName, err := instances.CurrentNodeName(context.TODO(), hostname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("error fetching current node name from cloud provider: %v", err)
|
return "", fmt.Errorf("error fetching current node name from cloud provider: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(2).InfoS("Cloud provider determined current node", "nodeName", klog.KRef("", string(nodeName)))
|
klog.V(2).InfoS("Cloud provider determined current node", "nodeName", klog.KRef("", string(nodeName)))
|
||||||
@ -1012,7 +1012,7 @@ func InitializeTLS(kf *options.KubeletFlags, kc *kubeletconfiginternal.KubeletCo
|
|||||||
}
|
}
|
||||||
cert, key, err := certutil.GenerateSelfSignedCertKey(hostName, nil, nil)
|
cert, key, err := certutil.GenerateSelfSignedCertKey(hostName, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to generate self signed cert: %v", err)
|
return nil, fmt.Errorf("unable to generate self signed cert: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := certutil.WriteCert(kc.TLSCertFile, cert); err != nil {
|
if err := certutil.WriteCert(kc.TLSCertFile, cert); err != nil {
|
||||||
@ -1060,7 +1060,7 @@ func InitializeTLS(kf *options.KubeletFlags, kc *kubeletconfiginternal.KubeletCo
|
|||||||
if len(kc.Authentication.X509.ClientCAFile) > 0 {
|
if len(kc.Authentication.X509.ClientCAFile) > 0 {
|
||||||
clientCAs, err := certutil.NewPool(kc.Authentication.X509.ClientCAFile)
|
clientCAs, err := certutil.NewPool(kc.Authentication.X509.ClientCAFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to load client CA file %s: %v", kc.Authentication.X509.ClientCAFile, err)
|
return nil, fmt.Errorf("unable to load client CA file %s: %w", kc.Authentication.X509.ClientCAFile, err)
|
||||||
}
|
}
|
||||||
// Specify allowed CAs for client certificates
|
// Specify allowed CAs for client certificates
|
||||||
tlsOptions.Config.ClientCAs = clientCAs
|
tlsOptions.Config.ClientCAs = clientCAs
|
||||||
@ -1168,7 +1168,7 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie
|
|||||||
kubeServer.SeccompProfileRoot,
|
kubeServer.SeccompProfileRoot,
|
||||||
kubeServer.NodeStatusMaxImages)
|
kubeServer.NodeStatusMaxImages)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create kubelet: %v", err)
|
return fmt.Errorf("failed to create kubelet: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMainKubelet should have set up a pod source config if one didn't exist
|
// NewMainKubelet should have set up a pod source config if one didn't exist
|
||||||
@ -1185,7 +1185,7 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie
|
|||||||
// process pods and exit.
|
// process pods and exit.
|
||||||
if runOnce {
|
if runOnce {
|
||||||
if _, err := k.RunOnce(podCfg.Updates()); err != nil {
|
if _, err := k.RunOnce(podCfg.Updates()); err != nil {
|
||||||
return fmt.Errorf("runonce failed: %v", err)
|
return fmt.Errorf("runonce failed: %w", err)
|
||||||
}
|
}
|
||||||
klog.InfoS("Started kubelet as runonce")
|
klog.InfoS("Started kubelet as runonce")
|
||||||
} else {
|
} else {
|
||||||
@ -1329,7 +1329,7 @@ func BootstrapKubeletConfigController(dynamicConfigDir string, transform dynamic
|
|||||||
c := dynamickubeletconfig.NewController(dir, transform)
|
c := dynamickubeletconfig.NewController(dir, transform)
|
||||||
kc, err := c.Bootstrap()
|
kc, err := c.Bootstrap()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed to determine a valid configuration, error: %v", err)
|
return nil, nil, fmt.Errorf("failed to determine a valid configuration, error: %w", err)
|
||||||
}
|
}
|
||||||
return kc, c, nil
|
return kc, c, nil
|
||||||
}
|
}
|
||||||
|
@ -29,12 +29,12 @@ func isAdmin() (bool, error) {
|
|||||||
// Get current user
|
// Get current user
|
||||||
u, err := user.Current()
|
u, err := user.Current()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("Error retrieving current user: %s", err)
|
return false, fmt.Errorf("error retrieving current user: %s", err)
|
||||||
}
|
}
|
||||||
// Get IDs of group user is a member of
|
// Get IDs of group user is a member of
|
||||||
ids, err := u.GroupIds()
|
ids, err := u.GroupIds()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("Error retrieving group ids: %s", err)
|
return false, fmt.Errorf("error retrieving group ids: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for existence of BUILTIN\ADMINISTRATORS group id
|
// Check for existence of BUILTIN\ADMINISTRATORS group id
|
||||||
@ -61,7 +61,7 @@ func checkPermissions() error {
|
|||||||
0, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0,
|
||||||
&sid)
|
&sid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error while checking for elevated permissions: %s", err)
|
return fmt.Errorf("error while checking for elevated permissions: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
//We must free the sid to prevent security token leaks
|
//We must free the sid to prevent security token leaks
|
||||||
@ -70,12 +70,12 @@ func checkPermissions() error {
|
|||||||
|
|
||||||
userIsAdmin, err = isAdmin()
|
userIsAdmin, err = isAdmin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error while checking admin group membership: %s", err)
|
return fmt.Errorf("error while checking admin group membership: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
member, err := token.IsMember(sid)
|
member, err := token.IsMember(sid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error while checking for elevated permissions: %s", err)
|
return fmt.Errorf("error while checking for elevated permissions: %s", err)
|
||||||
}
|
}
|
||||||
if !member {
|
if !member {
|
||||||
return fmt.Errorf("kubelet needs to run with administrator permissions. Run as admin is: %t, User in admin group: %t", member, userIsAdmin)
|
return fmt.Errorf("kubelet needs to run with administrator permissions. Run as admin is: %t, User in admin group: %t", member, userIsAdmin)
|
||||||
|
Loading…
Reference in New Issue
Block a user