Merge pull request #98939 from yangjunmyfm192085/run-test16

Structured Logging migration: modify dockershim and network part logs of kubelet.
This commit is contained in:
Kubernetes Prow Robot 2021-03-03 03:31:33 -08:00 committed by GitHub
commit a6a66c3594
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 188 additions and 191 deletions

View File

@ -88,19 +88,19 @@ func (m *containerManager) Start() error {
func (m *containerManager) doWork() {
v, err := m.client.Version()
if err != nil {
klog.Errorf("Unable to get docker version: %v", err)
klog.ErrorS(err, "Unable to get docker version")
return
}
version, err := utilversion.ParseGeneric(v.APIVersion)
if err != nil {
klog.Errorf("Unable to parse docker version %q: %v", v.APIVersion, err)
klog.ErrorS(err, "Unable to parse docker version", "dockerVersion", v.APIVersion)
return
}
// EnsureDockerInContainer does two things.
// 1. Ensure processes run in the cgroups if m.cgroupsManager is not nil.
// 2. Ensure processes have the OOM score applied.
if err := kubecm.EnsureDockerInContainer(version, dockerOOMScoreAdj, m.cgroupsManager); err != nil {
klog.Errorf("Unable to ensure the docker processes run in the desired containers: %v", err)
klog.ErrorS(err, "Unable to ensure the docker processes run in the desired containers")
}
}
@ -109,7 +109,7 @@ func createCgroupManager(name string) (cgroups.Manager, error) {
memoryCapacity, err := getMemoryCapacity()
if err != nil {
klog.Errorf("Failed to get the memory capacity on machine: %v", err)
klog.ErrorS(err, "Failed to get the memory capacity on machine")
} else {
memoryLimit = memoryCapacity * dockerMemoryLimitThresholdPercent / 100
}
@ -117,7 +117,7 @@ func createCgroupManager(name string) (cgroups.Manager, error) {
if err != nil || memoryLimit < minDockerMemoryLimit {
memoryLimit = minDockerMemoryLimit
}
klog.V(2).Infof("Configure resource-only container %q with memory limit: %d", name, memoryLimit)
klog.V(2).InfoS("Configure resource-only container with memory limit", "containerName", name, "memoryLimit", memoryLimit)
cg := &configs.Cgroup{
Parent: "/",

View File

@ -73,7 +73,7 @@ func (ds *dockerService) ListContainers(_ context.Context, r *runtimeapi.ListCon
converted, err := toRuntimeAPIContainer(&c)
if err != nil {
klog.V(4).Infof("Unable to convert docker to runtime API container: %v", err)
klog.V(4).InfoS("Unable to convert docker to runtime API container", "err", err)
continue
}
@ -234,7 +234,7 @@ func (ds *dockerService) createContainerLogSymlink(containerID string) error {
}
if path == "" {
klog.V(5).Infof("Container %s log path isn't specified, will not create the symlink", containerID)
klog.V(5).InfoS("Container log path isn't specified, will not create the symlink", "containerID", containerID)
return nil
}
@ -242,7 +242,7 @@ func (ds *dockerService) createContainerLogSymlink(containerID string) error {
// Only create the symlink when container log path is specified and log file exists.
// Delete possibly existing file first
if err = ds.os.Remove(path); err == nil {
klog.Warningf("Deleted previously existing symlink file: %q", path)
klog.InfoS("Deleted previously existing symlink file", "path", path)
}
if err = ds.os.Symlink(realPath, path); err != nil {
return fmt.Errorf("failed to create symbolic link %q to the container log file %q for container %q: %v",
@ -251,14 +251,14 @@ func (ds *dockerService) createContainerLogSymlink(containerID string) error {
} else {
supported, err := ds.IsCRISupportedLogDriver()
if err != nil {
klog.Warningf("Failed to check supported logging driver by CRI: %v", err)
klog.InfoS("Failed to check supported logging driver by CRI", "err", err)
return nil
}
if supported {
klog.Warningf("Cannot create symbolic link because container log file doesn't exist!")
klog.InfoS("Cannot create symbolic link because container log file doesn't exist!")
} else {
klog.V(5).Infof("Unsupported logging driver by CRI")
klog.V(5).InfoS("Unsupported logging driver by CRI")
}
}
@ -371,7 +371,7 @@ func (ds *dockerService) ContainerStatus(_ context.Context, req *runtimeapi.Cont
if !libdocker.IsImageNotFoundError(err) {
return nil, fmt.Errorf("unable to inspect docker image %q while inspecting docker container %q: %v", r.Image, containerID, err)
}
klog.Warningf("ignore error image %q not found while inspecting docker container %q: %v", r.Image, containerID, err)
klog.InfoS("Ignore error image not found while inspecting docker container", "containerID", containerID, "image", r.Image, "err", err)
}
imageID := toPullableImageID(r.Image, ir)
@ -498,7 +498,7 @@ func (ds *dockerService) performPlatformSpecificContainerCleanupAndLogErrors(con
errors := ds.performPlatformSpecificContainerCleanup(cleanupInfo)
for _, err := range errors {
klog.Warningf("error when cleaning up after container %q: %v", containerNameOrID, err)
klog.InfoS("Error when cleaning up after container", "containerNameOrID", containerNameOrID, "err", err)
}
return errors

View File

@ -54,7 +54,7 @@ func (ds *dockerService) ListImages(_ context.Context, r *runtimeapi.ListImagesR
for _, i := range images {
apiImage, err := imageToRuntimeAPIImage(&i)
if err != nil {
klog.V(5).Infof("Failed to convert docker API image %+v to runtime API image: %v", i, err)
klog.V(5).InfoS("Failed to convert docker API image to runtime API image", "image", i, "err", err)
continue
}
result = append(result, apiImage)

View File

@ -33,7 +33,7 @@ import (
func (ds *dockerService) ImageFsInfo(_ context.Context, _ *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) {
info, err := ds.client.Info()
if err != nil {
klog.Errorf("Failed to get docker info: %v", err)
klog.ErrorS(err, "Failed to get docker info")
return nil, err
}

View File

@ -32,14 +32,14 @@ import (
func (ds *dockerService) ImageFsInfo(_ context.Context, _ *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) {
info, err := ds.client.Info()
if err != nil {
klog.Errorf("Failed to get docker info: %v", err)
klog.ErrorS(err, "Failed to get docker info")
return nil, err
}
statsClient := &winstats.StatsClient{}
fsinfo, err := statsClient.GetDirFsInfo(info.DockerRootDir)
if err != nil {
klog.Errorf("Failed to get dir fsInfo for %q: %v", info.DockerRootDir, err)
klog.ErrorS(err, "Failed to get fsInfo for dockerRootDir", "path", info.DockerRootDir)
return nil, err
}

View File

@ -85,7 +85,7 @@ func (d *dockerService) GetContainerLogs(_ context.Context, pod *v1.Pod, contain
}
err = d.client.Logs(containerID.ID, opts, sopts)
if errors.Is(err, errMaximumWrite) {
klog.V(2).Infof("finished logs, hit byte limit %d", *logOptions.LimitBytes)
klog.V(2).InfoS("Finished logs, hit byte limit", "byteLimit", *logOptions.LimitBytes)
err = nil
}
return err

View File

@ -233,12 +233,11 @@ func (ds *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopP
if checkpointErr != errors.ErrCheckpointNotFound {
err := ds.checkpointManager.RemoveCheckpoint(podSandboxID)
if err != nil {
klog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, err)
klog.ErrorS(err, "Failed to delete corrupt checkpoint for sandbox", "podSandboxID", podSandboxID)
}
}
if libdocker.IsContainerNotFoundError(statusErr) {
klog.Warningf("Both sandbox container and checkpoint for id %q could not be found. "+
"Proceed without further sandbox information.", podSandboxID)
klog.InfoS("Both sandbox container and checkpoint could not be found. Proceed without further sandbox information.", "podSandboxID", podSandboxID)
} else {
return nil, utilerrors.NewAggregate([]error{
fmt.Errorf("failed to get checkpoint for sandbox %q: %v", podSandboxID, checkpointErr),
@ -272,7 +271,7 @@ func (ds *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopP
if err := ds.client.StopContainer(podSandboxID, defaultSandboxGracePeriod); err != nil {
// Do not return error if the container does not exist
if !libdocker.IsContainerNotFoundError(err) {
klog.Errorf("Failed to stop sandbox %q: %v", podSandboxID, err)
klog.ErrorS(err, "Failed to stop sandbox", "podSandboxID", podSandboxID)
errList = append(errList, err)
} else {
// remove the checkpoint for any sandbox that is not found in the runtime
@ -399,7 +398,7 @@ func (ds *dockerService) getIPs(podSandboxID string, sandbox *dockertypes.Contai
// If all else fails, warn but don't return an error, as pod status
// should generally not return anything except fatal errors
// FIXME: handle network errors by restarting the pod somehow?
klog.Warningf("failed to read pod IP from plugin/docker: %v", err)
klog.InfoS("Failed to read pod IP from plugin/docker", "err", err)
return ips
}
@ -532,7 +531,7 @@ func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPod
if filter == nil {
checkpoints, err = ds.checkpointManager.ListCheckpoints()
if err != nil {
klog.Errorf("Failed to list checkpoints: %v", err)
klog.ErrorS(err, "Failed to list checkpoints")
}
}
@ -549,7 +548,7 @@ func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPod
c := containers[i]
converted, err := containerToRuntimeAPISandbox(&c)
if err != nil {
klog.V(4).Infof("Unable to convert docker to runtime API sandbox %+v: %v", c, err)
klog.V(4).InfoS("Unable to convert docker to runtime API sandbox", "containerName", c.Names, "err", err)
continue
}
if filterOutReadySandboxes && converted.State == runtimeapi.PodSandboxState_SANDBOX_READY {
@ -569,11 +568,11 @@ func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPod
checkpoint := NewPodSandboxCheckpoint("", "", &CheckpointData{})
err := ds.checkpointManager.GetCheckpoint(id, checkpoint)
if err != nil {
klog.Errorf("Failed to retrieve checkpoint for sandbox %q: %v", id, err)
klog.ErrorS(err, "Failed to retrieve checkpoint for sandbox", "sandboxID", id)
if err == errors.ErrCorruptCheckpoint {
err = ds.checkpointManager.RemoveCheckpoint(id)
if err != nil {
klog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", id, err)
klog.ErrorS(err, "Failed to delete corrupt checkpoint for sandbox", "sandboxID", id)
}
}
continue
@ -719,14 +718,14 @@ func toCheckpointProtocol(protocol runtimeapi.Protocol) Protocol {
case runtimeapi.Protocol_SCTP:
return protocolSCTP
}
klog.Warningf("Unknown protocol %q: defaulting to TCP", protocol)
klog.InfoS("Unknown protocol, defaulting to TCP", "protocol", protocol)
return protocolTCP
}
// rewriteResolvFile rewrites resolv.conf file generated by docker.
func rewriteResolvFile(resolvFilePath string, dns []string, dnsSearch []string, dnsOptions []string) error {
if len(resolvFilePath) == 0 {
klog.Errorf("ResolvConfPath is empty.")
klog.ErrorS(nil, "ResolvConfPath is empty.")
return nil
}
@ -751,9 +750,9 @@ func rewriteResolvFile(resolvFilePath string, dns []string, dnsSearch []string,
resolvFileContentStr := strings.Join(resolvFileContent, "\n")
resolvFileContentStr += "\n"
klog.V(4).Infof("Will attempt to re-write config file %s with: \n%s", resolvFilePath, resolvFileContent)
klog.V(4).InfoS("Will attempt to re-write config file", "path", resolvFilePath, "fileContent", resolvFileContent)
if err := rewriteFile(resolvFilePath, resolvFileContentStr); err != nil {
klog.Errorf("resolv.conf could not be updated: %v", err)
klog.ErrorS(err, "Resolv.conf could not be updated")
return err
}
}

View File

@ -22,6 +22,7 @@ import (
"context"
"fmt"
"net/http"
"os"
"path"
"path/filepath"
"runtime"
@ -238,7 +239,7 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon
// lead to retries of the same failure, so just fail hard.
return nil, err
}
klog.Infof("Hairpin mode set to %q", pluginSettings.HairpinMode)
klog.InfoS("Hairpin mode is set", "hairpinMode", pluginSettings.HairpinMode)
// dockershim currently only supports CNI plugins.
pluginSettings.PluginBinDirs = cni.SplitDirs(pluginSettings.PluginBinDirString)
@ -253,27 +254,27 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon
return nil, fmt.Errorf("didn't find compatible CNI plugin with given settings %+v: %v", pluginSettings, err)
}
ds.network = network.NewPluginManager(plug)
klog.Infof("Docker cri networking managed by %v", plug.Name())
klog.InfoS("Docker cri networking managed by the network plugin", "networkPluginName", plug.Name())
// skipping cgroup driver checks for Windows
if runtime.GOOS == "linux" {
// NOTE: cgroup driver is only detectable in docker 1.11+
cgroupDriver := defaultCgroupDriver
dockerInfo, err := ds.client.Info()
klog.Infof("Docker Info: %+v", dockerInfo)
klog.InfoS("Docker Info", "dockerInfo", dockerInfo)
if err != nil {
klog.Errorf("Failed to execute Info() call to the Docker client: %v", err)
klog.Warningf("Falling back to use the default driver: %q", cgroupDriver)
klog.ErrorS(err, "Failed to execute Info() call to the Docker client")
klog.InfoS("Falling back to use the default driver", "cgroupDriver", cgroupDriver)
} else if len(dockerInfo.CgroupDriver) == 0 {
klog.Warningf("No cgroup driver is set in Docker")
klog.Warningf("Falling back to use the default driver: %q", cgroupDriver)
klog.InfoS("No cgroup driver is set in Docker")
klog.InfoS("Falling back to use the default driver", "cgroupDriver", cgroupDriver)
} else {
cgroupDriver = dockerInfo.CgroupDriver
}
if len(kubeCgroupDriver) != 0 && kubeCgroupDriver != cgroupDriver {
return nil, fmt.Errorf("misconfiguration: kubelet cgroup driver: %q is different from docker cgroup driver: %q", kubeCgroupDriver, cgroupDriver)
}
klog.Infof("Setting cgroupDriver to %s", cgroupDriver)
klog.InfoS("Setting cgroupDriver", "cgroupDriver", cgroupDriver)
ds.cgroupDriver = cgroupDriver
}
@ -355,7 +356,7 @@ func (ds *dockerService) UpdateRuntimeConfig(_ context.Context, r *runtimeapi.Up
return &runtimeapi.UpdateRuntimeConfigResponse{}, nil
}
klog.Infof("docker cri received runtime config %+v", runtimeConfig)
klog.InfoS("Docker cri received runtime config", "runtimeConfig", runtimeConfig)
if ds.network != nil && runtimeConfig.NetworkConfig.PodCidr != "" {
event := make(map[string]interface{})
event[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = runtimeConfig.NetworkConfig.PodCidr
@ -388,7 +389,7 @@ func (ds *dockerService) GetPodPortMappings(podSandboxID string) ([]*hostport.Po
}
errRem := ds.checkpointManager.RemoveCheckpoint(podSandboxID)
if errRem != nil {
klog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, errRem)
klog.ErrorS(errRem, "Failed to delete corrupt checkpoint for sandbox", "podSandboxID", podSandboxID)
}
return nil, err
}
@ -412,7 +413,8 @@ func (ds *dockerService) Start() error {
go func() {
if err := ds.streamingServer.Start(true); err != nil {
klog.Fatalf("Streaming server stopped unexpectedly: %v", err)
klog.ErrorS(err, "Streaming server stopped unexpectedly")
os.Exit(1)
}
}()
@ -425,7 +427,7 @@ func (ds *dockerService) initCleanup() {
errors := ds.platformSpecificContainerInitCleanup()
for _, err := range errors {
klog.Warningf("initialization error: %v", err)
klog.InfoS("Initialization error", "err", err)
}
}
@ -474,7 +476,7 @@ func (ds *dockerService) GenerateExpectedCgroupParent(cgroupParent string) (stri
cgroupParent = path.Base(cgroupParent)
}
}
klog.V(3).Infof("Setting cgroup parent to: %q", cgroupParent)
klog.V(3).InfoS("Setting cgroup parent", "cgroupParent", cgroupParent)
return cgroupParent, nil
}
@ -542,7 +544,7 @@ func toAPIProtocol(protocol Protocol) v1.Protocol {
case protocolSCTP:
return v1.ProtocolSCTP
}
klog.Warningf("Unknown protocol %q: defaulting to TCP", protocol)
klog.InfoS("Unknown protocol, defaulting to TCP", "protocol", protocol)
return v1.ProtocolTCP
}
@ -561,7 +563,7 @@ func effectiveHairpinMode(s *NetworkPluginSettings) error {
// This is not a valid combination, since promiscuous-bridge only works on kubenet. Users might be using the
// default values (from before the hairpin-mode flag existed) and we
// should keep the old behavior.
klog.Warningf("Hairpin mode set to %q but kubenet is not enabled, falling back to %q", s.HairpinMode, kubeletconfig.HairpinVeth)
klog.InfoS("Hairpin mode is set but kubenet is not enabled, falling back to HairpinVeth", "hairpinMode", s.HairpinMode)
s.HairpinMode = kubeletconfig.HairpinVeth
return nil
}

View File

@ -40,14 +40,14 @@ func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.Cont
// That will typically happen with init-containers in Exited state. Docker still knows about them but the HCS does not.
// As we don't want to block stats retrieval for other containers, we only log errors.
if !hcsshim.IsNotExist(err) && !hcsshim.IsAlreadyStopped(err) {
klog.V(4).Infof("Error opening container (stats will be missing) '%s': %v", containerID, err)
klog.V(4).InfoS("Error opening container (stats will be missing)", "containerID", containerID, "err", err)
}
return nil, nil
}
defer func() {
closeErr := hcsshimContainer.Close()
if closeErr != nil {
klog.Errorf("Error closing container '%s': %v", containerID, closeErr)
klog.ErrorS(closeErr, "Error closing container", "containerID", containerID)
}
}()
@ -60,7 +60,7 @@ func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.Cont
// These hcs errors do not have helpers exposed in public package so need to query for the known codes
// https://github.com/microsoft/hcsshim/blob/master/internal/hcs/errors.go
// PR to expose helpers in hcsshim: https://github.com/microsoft/hcsshim/pull/933
klog.V(4).Infof("Container is not in a state that stats can be accessed '%s': %v. This occurs when the container is created but not started.", containerID, err)
klog.V(4).InfoS("Container is not in a state that stats can be accessed. This occurs when the container is created but not started.", "containerID", containerID, "err", err)
return nil, nil
}
return nil, err

View File

@ -52,7 +52,7 @@ func (r *streamingRuntime) portForward(podSandboxID string, port int32, stream i
}
commandString := fmt.Sprintf("%s %s", nsenterPath, strings.Join(args, " "))
klog.V(4).Infof("executing port forwarding command: %s", commandString)
klog.V(4).InfoS("Executing port forwarding command", "command", commandString)
command := exec.Command(nsenterPath, args...)
command.Stdout = stream

View File

@ -150,7 +150,7 @@ func (*NativeExecHandler) ExecInContainer(ctx context.Context, client libdocker.
retries++
if retries == maxRetries {
klog.Errorf("Exec session %s in container %s terminated but process still running!", execObj.ID, container.ID)
klog.ErrorS(nil, "Exec session in the container terminated but process still running!", "execSession", execObj.ID, "containerID", container.ID)
return nil
}

View File

@ -147,7 +147,7 @@ func generateMountBindings(mounts []*runtimeapi.Mount) []string {
case runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER:
attrs = append(attrs, "rslave")
default:
klog.Warningf("unknown propagation mode for hostPath %q", m.HostPath)
klog.InfoS("Unknown propagation mode for hostPath", "path", m.HostPath)
// Falls back to "private"
}
@ -180,7 +180,7 @@ func makePortsAndBindings(pm []*runtimeapi.PortMapping) (dockernat.PortSet, map[
case runtimeapi.Protocol_SCTP:
protocol = "/sctp"
default:
klog.Warningf("Unknown protocol %q: defaulting to TCP", port.Protocol)
klog.InfoS("Unknown protocol, defaulting to TCP", "protocol", port.Protocol)
protocol = "/tcp"
}
@ -288,13 +288,13 @@ func recoverFromCreationConflictIfNeeded(client libdocker.Interface, createConfi
}
id := matches[1]
klog.Warningf("Unable to create pod sandbox due to conflict. Attempting to remove sandbox %q", id)
klog.InfoS("Unable to create pod sandbox due to conflict. Attempting to remove sandbox", "containerID", id)
rmErr := client.RemoveContainer(id, dockertypes.ContainerRemoveOptions{RemoveVolumes: true})
if rmErr == nil {
klog.V(2).Infof("Successfully removed conflicting container %q", id)
klog.V(2).InfoS("Successfully removed conflicting container", "containerID", id)
return nil, err
}
klog.Errorf("Failed to remove the conflicting container %q: %v", id, rmErr)
klog.ErrorS(rmErr, "Failed to remove the conflicting container", "containerID", id)
// Return if the error is not container not found error.
if !libdocker.IsContainerNotFoundError(rmErr) {
return nil, err
@ -302,7 +302,7 @@ func recoverFromCreationConflictIfNeeded(client libdocker.Interface, createConfi
// randomize the name to avoid conflict.
createConfig.Name = randomizeName(createConfig.Name)
klog.V(2).Infof("Create the container with randomized name %s", createConfig.Name)
klog.V(2).InfoS("Create the container with the randomized name", "containerName", createConfig.Name)
return client.CreateContainer(createConfig)
}
@ -337,7 +337,7 @@ func ensureSandboxImageExists(client libdocker.Interface, image string) error {
keyring := credentialprovider.NewDockerKeyring()
creds, withCredentials := keyring.Lookup(repoToPull)
if !withCredentials {
klog.V(3).Infof("Pulling image %q without credentials", image)
klog.V(3).InfoS("Pulling the image without credentials", "image", image)
err := client.PullImage(image, dockertypes.AuthConfig{}, dockertypes.ImagePullOptions{})
if err != nil {

View File

@ -33,12 +33,12 @@ func DefaultMemorySwap() int64 {
}
func (ds *dockerService) getSecurityOpts(seccompProfile string, separator rune) ([]string, error) {
klog.Warningf("getSecurityOpts is unsupported in this build")
klog.InfoS("getSecurityOpts is unsupported in this build")
return nil, nil
}
func (ds *dockerService) getSandBoxSecurityOpts(separator rune) []string {
klog.Warningf("getSandBoxSecurityOpts is unsupported in this build")
klog.InfoS("getSandBoxSecurityOpts is unsupported in this build")
return nil
}
@ -47,12 +47,12 @@ func (ds *dockerService) updateCreateConfig(
config *runtimeapi.ContainerConfig,
sandboxConfig *runtimeapi.PodSandboxConfig,
podSandboxID string, securityOptSep rune, apiVersion *semver.Version) error {
klog.Warningf("updateCreateConfig is unsupported in this build")
klog.InfoS("updateCreateConfig is unsupported in this build")
return nil
}
func (ds *dockerService) determinePodIPBySandboxID(uid string) []string {
klog.Warningf("determinePodIPBySandboxID is unsupported in this build")
klog.InfoS("determinePodIPBySandboxID is unsupported in this build")
return nil
}

View File

@ -38,7 +38,7 @@ func DefaultMemorySwap() int64 {
func (ds *dockerService) getSecurityOpts(seccompProfile string, separator rune) ([]string, error) {
if seccompProfile != "" {
klog.Warningf("seccomp annotations are not supported on windows")
klog.InfoS("seccomp annotations are not supported on windows")
}
return nil, nil
}

View File

@ -19,6 +19,7 @@ limitations under the License.
package libdocker
import (
"os"
"time"
dockertypes "github.com/docker/docker/api/types"
@ -74,7 +75,7 @@ type Interface interface {
// DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec
func getDockerClient(dockerEndpoint string) (*dockerapi.Client, error) {
if len(dockerEndpoint) > 0 {
klog.Infof("Connecting to docker on %s", dockerEndpoint)
klog.InfoS("Connecting to docker on the dockerEndpoint", "endpoint", dockerEndpoint)
return dockerapi.NewClientWithOpts(dockerapi.WithHost(dockerEndpoint), dockerapi.WithVersion(""))
}
return dockerapi.NewClientWithOpts(dockerapi.FromEnv)
@ -89,8 +90,10 @@ func getDockerClient(dockerEndpoint string) (*dockerapi.Client, error) {
func ConnectToDockerOrDie(dockerEndpoint string, requestTimeout, imagePullProgressDeadline time.Duration) Interface {
client, err := getDockerClient(dockerEndpoint)
if err != nil {
klog.Fatalf("Couldn't connect to docker: %v", err)
klog.ErrorS(err, "Couldn't connect to docker")
os.Exit(1)
}
klog.Infof("Start docker client with request timeout=%v", requestTimeout)
klog.InfoS("Start docker client with request timeout", "timeout", requestTimeout)
return newKubeDockerClient(client, requestTimeout, imagePullProgressDeadline)
}

View File

@ -44,7 +44,7 @@ func matchImageTagOrSHA(inspected dockertypes.ImageInspect, image string) bool {
// https://github.com/docker/distribution/blob/master/reference/reference.go#L4
named, err := dockerref.ParseNormalizedNamed(image)
if err != nil {
klog.V(4).Infof("couldn't parse image reference %q: %v", image, err)
klog.V(4).InfoS("Couldn't parse image reference", "image", image, "err", err)
return false
}
_, isTagged := named.(dockerref.Tagged)
@ -102,7 +102,7 @@ func matchImageTagOrSHA(inspected dockertypes.ImageInspect, image string) bool {
for _, repoDigest := range inspected.RepoDigests {
named, err := dockerref.ParseNormalizedNamed(repoDigest)
if err != nil {
klog.V(4).Infof("couldn't parse image RepoDigest reference %q: %v", repoDigest, err)
klog.V(4).InfoS("Couldn't parse image RepoDigest reference", "digest", repoDigest, "err", err)
continue
}
if d, isDigested := named.(dockerref.Digested); isDigested {
@ -116,14 +116,14 @@ func matchImageTagOrSHA(inspected dockertypes.ImageInspect, image string) bool {
// process the ID as a digest
id, err := godigest.Parse(inspected.ID)
if err != nil {
klog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err)
klog.V(4).InfoS("Couldn't parse image ID reference", "imageID", id, "err", err)
return false
}
if digest.Digest().Algorithm().String() == id.Algorithm().String() && digest.Digest().Hex() == id.Hex() {
return true
}
}
klog.V(4).Infof("Inspected image (%q) does not match %s", inspected.ID, image)
klog.V(4).InfoS("Inspected image ID does not match image", "inspectedImageID", inspected.ID, "image", image)
return false
}
@ -140,19 +140,19 @@ func matchImageIDOnly(inspected dockertypes.ImageInspect, image string) bool {
// Otherwise, we should try actual parsing to be more correct
ref, err := dockerref.Parse(image)
if err != nil {
klog.V(4).Infof("couldn't parse image reference %q: %v", image, err)
klog.V(4).InfoS("Couldn't parse image reference", "image", image, "err", err)
return false
}
digest, isDigested := ref.(dockerref.Digested)
if !isDigested {
klog.V(4).Infof("the image reference %q was not a digest reference", image)
klog.V(4).InfoS("The image reference was not a digest reference", "image", image)
return false
}
id, err := godigest.Parse(inspected.ID)
if err != nil {
klog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err)
klog.V(4).InfoS("Couldn't parse image ID reference", "imageID", id, "err", err)
return false
}
@ -160,6 +160,6 @@ func matchImageIDOnly(inspected dockertypes.ImageInspect, image string) bool {
return true
}
klog.V(4).Infof("The reference %s does not directly refer to the given image's ID (%q)", image, inspected.ID)
klog.V(4).InfoS("The image reference does not directly refer to the given image's ID", "image", image, "inspectedImageID", inspected.ID)
return false
}

View File

@ -337,14 +337,14 @@ func (p *progressReporter) start() {
progress, timestamp := p.progress.get()
// If there is no progress for p.imagePullProgressDeadline, cancel the operation.
if time.Since(timestamp) > p.imagePullProgressDeadline {
klog.Errorf("Cancel pulling image %q because of no progress for %v, latest progress: %q", p.image, p.imagePullProgressDeadline, progress)
klog.ErrorS(nil, "Cancel pulling image because of exceed image pull deadline, record latest progress", "image", p.image, "deadline", p.imagePullProgressDeadline, "progress", progress)
p.cancel()
return
}
klog.V(2).Infof("Pulling image %q: %q", p.image, progress)
klog.V(2).InfoS("Pulling image", "image", p.image, "progress", progress)
case <-p.stopCh:
progress, _ := p.progress.get()
klog.V(2).Infof("Stop pulling image %q: %q", p.image, progress)
klog.V(2).InfoS("Stop pulling image", "image", p.image, "progress", progress)
return
}
}

View File

@ -170,30 +170,30 @@ func getDefaultCNINetwork(confDir string, binDirs []string) (*cniNetwork, error)
if strings.HasSuffix(confFile, ".conflist") {
confList, err = libcni.ConfListFromFile(confFile)
if err != nil {
klog.Warningf("Error loading CNI config list file %s: %v", confFile, err)
klog.InfoS("Error loading CNI config list file", "path", confFile, "err", err)
continue
}
} else {
conf, err := libcni.ConfFromFile(confFile)
if err != nil {
klog.Warningf("Error loading CNI config file %s: %v", confFile, err)
klog.InfoS("Error loading CNI config file", "path", confFile, "err", err)
continue
}
// Ensure the config has a "type" so we know what plugin to run.
// Also catches the case where somebody put a conflist into a conf file.
if conf.Network.Type == "" {
klog.Warningf("Error loading CNI config file %s: no 'type'; perhaps this is a .conflist?", confFile)
klog.InfoS("Error loading CNI config file: no 'type'; perhaps this is a .conflist?", "path", confFile)
continue
}
confList, err = libcni.ConfListFromConf(conf)
if err != nil {
klog.Warningf("Error converting CNI config file %s to list: %v", confFile, err)
klog.InfoS("Error converting CNI config file to list", "path", confFile, "err", err)
continue
}
}
if len(confList.Plugins) == 0 {
klog.Warningf("CNI config list %s has no networks, skipping", string(confList.Bytes[:maxStringLengthInLog(len(confList.Bytes))]))
klog.InfoS("CNI config list has no networks, skipping", "configList", string(confList.Bytes[:maxStringLengthInLog(len(confList.Bytes))]))
continue
}
@ -201,11 +201,11 @@ func getDefaultCNINetwork(confDir string, binDirs []string) (*cniNetwork, error)
// all plugins of this config exist on disk
caps, err := cniConfig.ValidateNetworkList(context.TODO(), confList)
if err != nil {
klog.Warningf("Error validating CNI config list %s: %v", string(confList.Bytes[:maxStringLengthInLog(len(confList.Bytes))]), err)
klog.InfoS("Error validating CNI config list", "configList", string(confList.Bytes[:maxStringLengthInLog(len(confList.Bytes))]), "err", err)
continue
}
klog.V(4).Infof("Using CNI configuration file %s", confFile)
klog.V(4).InfoS("Using CNI configuration file", "path", confFile)
return &cniNetwork{
name: confList.Name,
@ -236,7 +236,7 @@ func (plugin *cniNetworkPlugin) Init(host network.Host, hairpinMode kubeletconfi
func (plugin *cniNetworkPlugin) syncNetworkConfig() {
network, err := getDefaultCNINetwork(plugin.confDir, plugin.binDirs)
if err != nil {
klog.Warningf("Unable to update cni config: %s", err)
klog.InfoS("Unable to update cni config", "err", err)
return
}
plugin.setDefaultNetwork(network)
@ -278,12 +278,12 @@ func (plugin *cniNetworkPlugin) Event(name string, details map[string]interface{
podCIDR, ok := details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR].(string)
if !ok {
klog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE)
klog.InfoS("The event didn't contain pod CIDR", "event", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE)
return
}
if plugin.podCidr != "" {
klog.Warningf("Ignoring subsequent pod CIDR update to %s", podCIDR)
klog.InfoS("Ignoring subsequent pod CIDR update to new cidr", "podCIDR", podCIDR)
return
}
@ -330,7 +330,7 @@ func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id ku
// Lack of namespace should not be fatal on teardown
netnsPath, err := plugin.host.GetNetNS(id.ID)
if err != nil {
klog.Warningf("CNI failed to retrieve network namespace path: %v", err)
klog.InfoS("CNI failed to retrieve network namespace path", "err", err)
}
// Todo get the timeout from parent ctx
@ -340,54 +340,47 @@ func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id ku
if plugin.loNetwork != nil {
// Loopback network deletion failure should not be fatal on teardown
if err := plugin.deleteFromNetwork(cniTimeoutCtx, plugin.loNetwork, name, namespace, id, netnsPath, nil); err != nil {
klog.Warningf("CNI failed to delete loopback network: %v", err)
klog.InfoS("CNI failed to delete loopback network", "err", err)
}
}
return plugin.deleteFromNetwork(cniTimeoutCtx, plugin.getDefaultNetwork(), name, namespace, id, netnsPath, nil)
}
func podDesc(namespace, name string, id kubecontainer.ContainerID) string {
return fmt.Sprintf("%s_%s/%s", namespace, name, id.ID)
}
func (plugin *cniNetworkPlugin) addToNetwork(ctx context.Context, network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations, options map[string]string) (cnitypes.Result, error) {
rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath, annotations, options)
if err != nil {
klog.Errorf("Error adding network when building cni runtime conf: %v", err)
klog.ErrorS(err, "Error adding network when building cni runtime conf")
return nil, err
}
pdesc := podDesc(podNamespace, podName, podSandboxID)
netConf, cniNet := network.NetworkConfig, network.CNIConfig
klog.V(4).Infof("Adding %s to network %s/%s netns %q", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, podNetnsPath)
klog.V(4).InfoS("Adding pod to network", "pod", klog.KRef(podNamespace, podName), "podSandboxID", podSandboxID, "podNetnsPath", podNetnsPath, "networkType", netConf.Plugins[0].Network.Type, "networkName", netConf.Name)
res, err := cniNet.AddNetworkList(ctx, netConf, rt)
if err != nil {
klog.Errorf("Error adding %s to network %s/%s: %v", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, err)
klog.ErrorS(err, "Error adding pod to network", "pod", klog.KRef(podNamespace, podName), "podSandboxID", podSandboxID, "podNetnsPath", podNetnsPath, "networkType", netConf.Plugins[0].Network.Type, "networkName", netConf.Name)
return nil, err
}
klog.V(4).Infof("Added %s to network %s: %v", pdesc, netConf.Name, res)
klog.V(4).InfoS("Added pod to network", "pod", klog.KRef(podNamespace, podName), "podSandboxID", podSandboxID, "networkName", netConf.Name, "response", res)
return res, nil
}
func (plugin *cniNetworkPlugin) deleteFromNetwork(ctx context.Context, network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations map[string]string) error {
rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath, annotations, nil)
if err != nil {
klog.Errorf("Error deleting network when building cni runtime conf: %v", err)
klog.ErrorS(err, "Error deleting network when building cni runtime conf")
return err
}
pdesc := podDesc(podNamespace, podName, podSandboxID)
netConf, cniNet := network.NetworkConfig, network.CNIConfig
klog.V(4).Infof("Deleting %s from network %s/%s netns %q", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, podNetnsPath)
klog.V(4).InfoS("Deleting pod from network", "pod", klog.KRef(podNamespace, podName), "podSandboxID", podSandboxID, "podNetnsPath", podNetnsPath, "networkType", netConf.Plugins[0].Network.Type, "networkName", netConf.Name)
err = cniNet.DelNetworkList(ctx, netConf, rt)
// The pod may not get deleted successfully at the first time.
// Ignore "no such file or directory" error in case the network has already been deleted in previous attempts.
if err != nil && !strings.Contains(err.Error(), "no such file or directory") {
klog.Errorf("Error deleting %s from network %s/%s: %v", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, err)
klog.ErrorS(err, "Error deleting pod from network", "pod", klog.KRef(podNamespace, podName), "podSandboxID", podSandboxID, "podNetnsPath", podNetnsPath, "networkType", netConf.Plugins[0].Network.Type, "networkName", netConf.Name)
return err
}
klog.V(4).Infof("Deleted %s from network %s/%s", pdesc, netConf.Plugins[0].Network.Type, netConf.Name)
klog.V(4).InfoS("Deleted pod from network", "pod", klog.KRef(podNamespace, podName), "podSandboxID", podSandboxID, "networkType", netConf.Plugins[0].Network.Type, "networkName", netConf.Name)
return nil
}

View File

@ -74,7 +74,7 @@ func findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceN
}
func setUpInterface(ifName string) error {
klog.V(3).Infof("Enabling hairpin on interface %s", ifName)
klog.V(3).InfoS("Enabling hairpin on interface", "interfaceName", ifName)
ifPath := path.Join(sysfsNetPath, ifName)
if _, err := os.Stat(ifPath); err != nil {
return err

View File

@ -114,7 +114,7 @@ func openLocalPort(hp *hostport) (closeable, error) {
default:
return nil, fmt.Errorf("unknown protocol %q", hp.protocol)
}
klog.V(3).Infof("Opened local port %s", hp.String())
klog.V(3).InfoS("Opened local port", "port", hp.String())
return socket, nil
}
@ -130,7 +130,7 @@ func portMappingToHostport(portMapping *PortMapping, family ipFamily) hostport {
// ensureKubeHostportChains ensures the KUBE-HOSTPORTS chain is setup correctly
func ensureKubeHostportChains(iptables utiliptables.Interface, natInterfaceName string) error {
klog.V(4).Info("Ensuring kubelet hostport chains")
klog.V(4).InfoS("Ensuring kubelet hostport chains")
// Ensure kubeHostportChain
if _, err := iptables.EnsureChain(utiliptables.TableNAT, kubeHostportsChain); err != nil {
return fmt.Errorf("failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubeHostportsChain, err)

View File

@ -69,7 +69,7 @@ func NewHostportManager(iptables utiliptables.Interface) HostPortManager {
}
h.conntrackFound = conntrack.Exists(h.execer)
if !h.conntrackFound {
klog.Warningf("The binary conntrack is not installed, this can cause failures in network connection cleanup.")
klog.InfoS("The binary conntrack is not installed, this can cause failures in network connection cleanup.")
}
return h
}
@ -189,11 +189,11 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt
// create a new conntrack entry without any DNAT. That will result in blackhole of the traffic even after correct
// iptables rules have been added back.
if hm.execer != nil && hm.conntrackFound {
klog.Infof("Starting to delete udp conntrack entries: %v, isIPv6 - %v", conntrackPortsToRemove, isIPv6)
klog.InfoS("Starting to delete udp conntrack entries", "conntrackEntries", conntrackPortsToRemove, "isIPv6", isIPv6)
for _, port := range conntrackPortsToRemove {
err = conntrack.ClearEntriesForPort(hm.execer, port, isIPv6, v1.ProtocolUDP)
if err != nil {
klog.Errorf("Failed to clear udp conntrack for port %d, error: %v", port, err)
klog.ErrorS(err, "Failed to clear udp conntrack for port", "port", port)
}
}
}
@ -268,7 +268,7 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er
// syncIPTables executes iptables-restore with given lines
func (hm *hostportManager) syncIPTables(lines []byte) error {
klog.V(3).Infof("Restoring iptables rules: %s", lines)
klog.V(3).InfoS("Restoring iptables rules", "iptableRules", lines)
err := hm.iptables.RestoreAll(lines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
return fmt.Errorf("failed to execute iptables-restore: %v", err)
@ -310,7 +310,7 @@ func (hm *hostportManager) openHostports(podPortMapping *PodPortMapping) (map[ho
if retErr != nil {
for hp, socket := range ports {
if err := socket.Close(); err != nil {
klog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podPortMapping), err)
klog.ErrorS(err, "Cannot clean up hostport for the pod", "podFullName", getPodFullName(podPortMapping), "port", hp.port)
}
}
return nil, retErr
@ -324,14 +324,14 @@ func (hm *hostportManager) closeHostports(hostportMappings []*PortMapping) error
for _, pm := range hostportMappings {
hp := portMappingToHostport(pm, hm.getIPFamily())
if socket, ok := hm.hostPortMap[hp]; ok {
klog.V(2).Infof("Closing host port %s", hp.String())
klog.V(2).InfoS("Closing host port", "port", hp.String())
if err := socket.Close(); err != nil {
errList = append(errList, fmt.Errorf("failed to close host port %s: %v", hp.String(), err))
continue
}
delete(hm.hostPortMap, hp)
} else {
klog.V(5).Infof("host port %s does not have an open socket", hp.String())
klog.V(5).InfoS("Host port does not have an open socket", "port", hp.String())
}
}
return utilerrors.NewAggregate(errList)

View File

@ -143,10 +143,10 @@ func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode kubeletc
if mtu == network.UseDefaultMTU {
if link, err := findMinMTU(); err == nil {
plugin.mtu = link.MTU
klog.V(5).Infof("Using interface %s MTU %d as bridge MTU", link.Name, link.MTU)
klog.V(5).InfoS("Using the interface MTU value as bridge MTU", "interfaceName", link.Name, "mtuValue", link.MTU)
} else {
plugin.mtu = fallbackMTU
klog.Warningf("Failed to find default bridge MTU, using %d: %v", fallbackMTU, err)
klog.InfoS("Failed to find default bridge MTU, using default value", "mtuValue", fallbackMTU, "err", err)
}
} else {
plugin.mtu = mtu
@ -161,7 +161,7 @@ func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode kubeletc
plugin.execer.Command("modprobe", "br-netfilter").CombinedOutput()
err := plugin.sysctl.SetSysctl(sysctlBridgeCallIPTables, 1)
if err != nil {
klog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err)
klog.InfoS("can't set sysctl bridge-nf-call-iptables", "err", err)
}
plugin.loConfig, err = libcni.ConfFromBytes([]byte(`{
@ -240,28 +240,28 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf
podCIDR, ok := details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR].(string)
if !ok {
klog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE)
klog.InfoS("The event didn't contain pod CIDR", "event", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE)
return
}
if plugin.netConfig != nil {
klog.Warningf("Ignoring subsequent pod CIDR update to %s", podCIDR)
klog.InfoS("Ignoring subsequent pod CIDR update to new cidr", "podCIDR", podCIDR)
return
}
klog.V(4).Infof("kubenet: PodCIDR is set to %q", podCIDR)
klog.V(4).InfoS("Kubenet: PodCIDR is set to new value", "podCIDR", podCIDR)
podCIDRs := strings.Split(podCIDR, ",")
// reset to one cidr if dual stack is not enabled
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.IPv6DualStack) && len(podCIDRs) > 1 {
klog.V(2).Infof("This node has multiple pod cidrs assigned and dual stack is not enabled. ignoring all except first cidr")
klog.V(2).InfoS("This node has multiple pod cidrs assigned and dual stack is not enabled. ignoring all except first cidr")
podCIDRs = podCIDRs[0:1]
}
for idx, currentPodCIDR := range podCIDRs {
_, cidr, err := net.ParseCIDR(currentPodCIDR)
if nil != err {
klog.Warningf("Failed to generate CNI network config with cidr %s at index:%v: %v", currentPodCIDR, idx, err)
klog.InfoS("Failed to generate CNI network config with cidr at the index", "podCIDR", currentPodCIDR, "index", idx, "err", err)
return
}
// create list of ips
@ -272,10 +272,10 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf
setHairpin := plugin.hairpinMode == kubeletconfig.HairpinVeth
json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, plugin.getRangesConfig(), plugin.getRoutesConfig())
klog.V(4).Infof("CNI network config set to %v", json)
klog.V(4).InfoS("CNI network config set to json format", "cniNetworkConfig", json)
plugin.netConfig, err = libcni.ConfFromBytes([]byte(json))
if err != nil {
klog.Warningf("** failed to set up CNI with %v err:%v", json, err)
klog.InfoS("** failed to set up CNI with json format", "cniNetworkConfig", json, "err", err)
// just incase it was set by mistake
plugin.netConfig = nil
// we bail out by clearing the *entire* list
@ -302,13 +302,13 @@ func (plugin *kubenetNetworkPlugin) clearUnusedBridgeAddresses() {
addrs, err := netlink.AddrList(bridge, unix.AF_INET)
if err != nil {
klog.V(2).Infof("attempting to get address for interface: %s failed with err:%v", BridgeName, err)
klog.V(2).InfoS("Attempting to get address for the interface failed", "interfaceName", BridgeName, "err", err)
return
}
for _, addr := range addrs {
if !cidrIncluded(plugin.podCIDRs, addr.IPNet) {
klog.V(2).Infof("Removing old address %s from %s", addr.IPNet.String(), BridgeName)
klog.V(2).InfoS("Removing old address from the interface", "interfaceName", BridgeName, "address", addr.IPNet.String())
netlink.AddrDel(bridge, &addr)
}
}
@ -330,7 +330,7 @@ func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kube
// Disable DAD so we skip the kernel delay on bringing up new interfaces.
if err := plugin.disableContainerDAD(id); err != nil {
klog.V(3).Infof("Failed to disable DAD in container: %v", err)
klog.V(3).InfoS("Failed to disable DAD in container", "err", err)
}
// Bring up container loopback interface
@ -476,20 +476,20 @@ func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id k
}
defer func() {
klog.V(4).Infof("SetUpPod took %v for %s/%s", time.Since(start), namespace, name)
klog.V(4).InfoS("SetUpPod took time", "pod", klog.KRef(namespace, name), "duration", time.Since(start))
}()
if err := plugin.setup(namespace, name, id, annotations); err != nil {
if err := plugin.teardown(namespace, name, id); err != nil {
// Not a hard error or warning
klog.V(4).Infof("Failed to clean up %s/%s after SetUpPod failure: %v", namespace, name, err)
klog.V(4).InfoS("Failed to clean up pod after SetUpPod failure", "pod", klog.KRef(namespace, name), "err", err)
}
return err
}
// Need to SNAT outbound traffic from cluster
if err := plugin.ensureMasqRule(); err != nil {
klog.Errorf("Failed to ensure MASQ rule: %v", err)
klog.ErrorS(err, "Failed to ensure MASQ rule")
}
return nil
@ -502,21 +502,21 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k
// Loopback network deletion failure should not be fatal on teardown
if err := plugin.delContainerFromNetwork(plugin.loConfig, "lo", namespace, name, id); err != nil {
klog.Warningf("Failed to delete loopback network: %v", err)
klog.InfoS("Failed to delete loopback network", "err", err)
errList = append(errList, err)
}
// no ip dependent actions
if err := plugin.delContainerFromNetwork(plugin.netConfig, network.DefaultInterfaceName, namespace, name, id); err != nil {
klog.Warningf("Failed to delete %q network: %v", network.DefaultInterfaceName, err)
klog.InfoS("Failed to delete the interface network", "interfaceName", network.DefaultInterfaceName, "err", err)
errList = append(errList, err)
}
// If there are no IPs registered we can't teardown pod's IP dependencies
iplist, exists := plugin.getCachedPodIPs(id)
if !exists || len(iplist) == 0 {
klog.V(5).Infof("container %s (%s/%s) does not have recorded. ignoring teardown call", id, name, namespace)
klog.V(5).InfoS("Container does not have IP registered. Ignoring teardown call", "containerID", id, "pod", klog.KRef(namespace, name))
return nil
}
@ -529,7 +529,7 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k
// process each pod IP
for _, ip := range iplist {
isV6 := netutils.IsIPv6String(ip)
klog.V(5).Infof("Removing pod port mappings from IP %s", ip)
klog.V(5).InfoS("Removing pod port mappings from the IP", "IP", ip)
if portMappings != nil && len(portMappings) > 0 {
if isV6 {
if err = plugin.hostportManagerv6.Remove(id.ID, &hostport.PodPortMapping{
@ -552,7 +552,7 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k
}
}
klog.V(5).Infof("Removing pod IP %s from shaper for (%s/%s)", ip, name, namespace)
klog.V(5).InfoS("Removing pod IP from shaper for the pod", "pod", klog.KRef(namespace, name), "IP", ip)
// shaper uses a cidr, but we are using a single IP.
mask := "32"
if isV6 {
@ -561,7 +561,7 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k
if err := plugin.shaper().Reset(fmt.Sprintf("%s/%s", ip, mask)); err != nil {
// Possible bandwidth shaping wasn't enabled for this pod anyways
klog.V(4).Infof("Failed to remove pod IP %s from shaper: %v", ip, err)
klog.V(4).InfoS("Failed to remove pod IP from shaper", "IP", ip, "err", err)
}
plugin.removePodIP(id, ip)
@ -572,7 +572,7 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k
func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error {
start := time.Now()
defer func() {
klog.V(4).Infof("TearDownPod took %v for %s/%s", time.Since(start), namespace, name)
klog.V(4).InfoS("TearDownPod took time", "pod", klog.KRef(namespace, name), "duration", time.Since(start))
}()
if plugin.netConfig == nil {
@ -585,7 +585,7 @@ func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, i
// Need to SNAT outbound traffic from cluster
if err := plugin.ensureMasqRule(); err != nil {
klog.Errorf("Failed to ensure MASQ rule: %v", err)
klog.ErrorS(err, "Failed to ensure MASQ rule")
}
return nil
}
@ -690,7 +690,7 @@ func (plugin *kubenetNetworkPlugin) checkRequiredCNIPluginsInOneDir(dir string)
func (plugin *kubenetNetworkPlugin) buildCNIRuntimeConf(ifName string, id kubecontainer.ContainerID, needNetNs bool) (*libcni.RuntimeConf, error) {
netnsPath, err := plugin.host.GetNetNS(id.ID)
if needNetNs && err != nil {
klog.Errorf("Kubenet failed to retrieve network namespace path: %v", err)
klog.ErrorS(err, "Kubenet failed to retrieve network namespace path")
}
return &libcni.RuntimeConf{
@ -707,7 +707,7 @@ func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.Network
return nil, fmt.Errorf("error building CNI config: %v", err)
}
klog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt)
klog.V(3).InfoS("Adding pod to network with CNI plugin and runtime", "pod", klog.KRef(namespace, name), "networkName", config.Network.Name, "networkType", config.Network.Type, "rt", rt)
// Because the default remote runtime request timeout is 4 min,so set slightly less than 240 seconds
// Todo get the timeout from parent ctx
cniTimeoutCtx, cancelFunc := context.WithTimeout(context.Background(), network.CNITimeoutSec*time.Second)
@ -725,7 +725,7 @@ func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.Netwo
return fmt.Errorf("error building CNI config: %v", err)
}
klog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt)
klog.V(3).InfoS("Removing pod from network with CNI plugin and runtime", "pod", klog.KRef(namespace, name), "networkName", config.Network.Name, "networkType", config.Network.Type, "rt", rt)
// Because the default remote runtime request timeout is 4 min,so set slightly less than 240 seconds
// Todo get the timeout from parent ctx
cniTimeoutCtx, cancelFunc := context.WithTimeout(context.Background(), network.CNITimeoutSec*time.Second)
@ -755,34 +755,34 @@ func (plugin *kubenetNetworkPlugin) shaper() bandwidth.Shaper {
func (plugin *kubenetNetworkPlugin) syncEbtablesDedupRules(macAddr net.HardwareAddr, podCIDRs []net.IPNet, podGateways []net.IP) {
if plugin.ebtables == nil {
plugin.ebtables = utilebtables.New(plugin.execer)
klog.V(3).Infof("Flushing dedup chain")
klog.V(3).InfoS("Flushing dedup chain")
if err := plugin.ebtables.FlushChain(utilebtables.TableFilter, dedupChain); err != nil {
klog.Errorf("Failed to flush dedup chain: %v", err)
klog.ErrorS(err, "Failed to flush dedup chain")
}
}
_, err := plugin.ebtables.GetVersion()
if err != nil {
klog.Warningf("Failed to get ebtables version. Skip syncing ebtables dedup rules: %v", err)
klog.InfoS("Failed to get ebtables version. Skip syncing ebtables dedup rules", "err", err)
return
}
// ensure custom chain exists
_, err = plugin.ebtables.EnsureChain(utilebtables.TableFilter, dedupChain)
if err != nil {
klog.Errorf("Failed to ensure %v chain %v", utilebtables.TableFilter, dedupChain)
klog.ErrorS(nil, "Failed to ensure filter table KUBE-DEDUP chain")
return
}
// jump to custom chain to the chain from core tables
_, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, utilebtables.ChainOutput, "-j", string(dedupChain))
if err != nil {
klog.Errorf("Failed to ensure %v chain %v jump to %v chain: %v", utilebtables.TableFilter, utilebtables.ChainOutput, dedupChain, err)
klog.ErrorS(err, "Failed to ensure filter table OUTPUT chain jump to KUBE-DEDUP chain")
return
}
// per gateway rule
for idx, gw := range podGateways {
klog.V(3).Infof("Filtering packets with ebtables on mac address: %v, gateway: %v, pod CIDR: %v", macAddr.String(), gw.String(), podCIDRs[idx].String())
klog.V(3).InfoS("Filtering packets with ebtables", "mac", macAddr.String(), "gateway", gw.String(), "podCIDR", podCIDRs[idx].String())
bIsV6 := netutils.IsIPv6(gw)
IPFamily := "IPv4"
@ -794,13 +794,13 @@ func (plugin *kubenetNetworkPlugin) syncEbtablesDedupRules(macAddr net.HardwareA
commonArgs := []string{"-p", IPFamily, "-s", macAddr.String(), "-o", "veth+"}
_, err = plugin.ebtables.EnsureRule(utilebtables.Prepend, utilebtables.TableFilter, dedupChain, append(commonArgs, ipSrc, gw.String(), "-j", "ACCEPT")...)
if err != nil {
klog.Errorf("Failed to ensure packets from cbr0 gateway:%v to be accepted with error:%v", gw.String(), err)
klog.ErrorS(err, "Failed to ensure packets from cbr0 gateway to be accepted with error", "gateway", gw.String())
return
}
_, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, dedupChain, append(commonArgs, ipSrc, podCIDRs[idx].String(), "-j", "DROP")...)
if err != nil {
klog.Errorf("Failed to ensure packets from podCidr[%v] but has mac address of cbr0 to get dropped. err:%v", podCIDRs[idx].String(), err)
klog.ErrorS(err, "Failed to ensure packets from podCidr but has mac address of cbr0 to get dropped.", "podCIDR", podCIDRs[idx].String())
return
}
}

View File

@ -165,7 +165,7 @@ func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host H
if err != nil {
allErrs = append(allErrs, fmt.Errorf("network plugin %q failed init: %v", networkPluginName, err))
} else {
klog.V(1).Infof("Loaded network plugin %q", networkPluginName)
klog.V(1).InfoS("Loaded network plugin", "networkPluginName", networkPluginName)
}
} else {
allErrs = append(allErrs, fmt.Errorf("network plugin %q not found", networkPluginName))
@ -192,12 +192,12 @@ func (plugin *NoopNetworkPlugin) Init(host Host, hairpinMode kubeletconfig.Hairp
// it was built-in.
utilexec.New().Command("modprobe", "br-netfilter").CombinedOutput()
if err := plugin.Sysctl.SetSysctl(sysctlBridgeCallIPTables, 1); err != nil {
klog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err)
klog.InfoS("can't set sysctl bridge-nf-call-iptables", "err", err)
}
if val, err := plugin.Sysctl.GetSysctl(sysctlBridgeCallIP6Tables); err == nil {
if val != 1 {
if err = plugin.Sysctl.SetSysctl(sysctlBridgeCallIP6Tables, 1); err != nil {
klog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIP6Tables, err)
klog.InfoS("can't set sysctl bridge-nf-call-ip6tables", "err", err)
}
}
}
@ -365,12 +365,12 @@ func (pm *PluginManager) podUnlock(fullPodName string) {
lock, ok := pm.pods[fullPodName]
if !ok {
klog.Warningf("Unbalanced pod lock unref for %s", fullPodName)
klog.InfoS("Unbalanced pod lock unref for the pod", "podFullName", fullPodName)
return
} else if lock.refcount == 0 {
// This should never ever happen, but handle it anyway
delete(pm.pods, fullPodName)
klog.Warningf("Pod lock for %s still in map with zero refcount", fullPodName)
klog.InfoS("Pod lock for the pod still in map with zero refcount", "podFullName", fullPodName)
return
}
lock.refcount--
@ -414,7 +414,7 @@ func (pm *PluginManager) SetUpPod(podNamespace, podName string, id kubecontainer
pm.podLock(fullPodName).Lock()
defer pm.podUnlock(fullPodName)
klog.V(3).Infof("Calling network plugin %s to set up pod %q", pm.plugin.Name(), fullPodName)
klog.V(3).InfoS("Calling network plugin to set up the pod", "pod", klog.KRef(podNamespace, podName), "networkPluginName", pm.plugin.Name())
if err := pm.plugin.SetUpPod(podNamespace, podName, id, annotations, options); err != nil {
recordError(operation)
return fmt.Errorf("networkPlugin %s failed to set up pod %q network: %v", pm.plugin.Name(), fullPodName, err)
@ -430,7 +430,7 @@ func (pm *PluginManager) TearDownPod(podNamespace, podName string, id kubecontai
pm.podLock(fullPodName).Lock()
defer pm.podUnlock(fullPodName)
klog.V(3).Infof("Calling network plugin %s to tear down pod %q", pm.plugin.Name(), fullPodName)
klog.V(3).InfoS("Calling network plugin to tear down the pod", "pod", klog.KRef(podNamespace, podName), "networkPluginName", pm.plugin.Name())
if err := pm.plugin.TearDownPod(podNamespace, podName, id); err != nil {
recordError(operation)
return fmt.Errorf("networkPlugin %s failed to teardown pod %q network: %v", pm.plugin.Name(), fullPodName, err)

View File

@ -20,6 +20,7 @@ package remote
import (
"fmt"
"os"
"google.golang.org/grpc"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
@ -54,11 +55,11 @@ func NewDockerServer(endpoint string, s dockershim.CRIService) *DockerServer {
func (s *DockerServer) Start() error {
// Start the internal service.
if err := s.service.Start(); err != nil {
klog.Errorf("Unable to start docker service")
klog.ErrorS(err, "Unable to start docker service")
return err
}
klog.V(2).Infof("Start dockershim grpc server")
klog.V(2).InfoS("Start dockershim grpc server")
l, err := util.CreateListener(s.endpoint)
if err != nil {
return fmt.Errorf("failed to listen on %q: %v", s.endpoint, err)
@ -72,7 +73,8 @@ func (s *DockerServer) Start() error {
runtimeapi.RegisterImageServiceServer(s.server, s.service)
go func() {
if err := s.server.Serve(l); err != nil {
klog.Fatalf("Failed to serve connections: %v", err)
klog.ErrorS(err, "Failed to serve connections")
os.Exit(1)
}
}()
return nil

View File

@ -58,10 +58,9 @@ func runDockershim(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
}
// The unix socket for kubelet <-> dockershim communication, dockershim start before runtime service init.
klog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q",
remoteRuntimeEndpoint,
remoteImageEndpoint)
klog.V(2).Infof("Starting the GRPC server for the docker CRI shim.")
klog.V(5).InfoS("Using remote runtime endpoint and image endpoint", "runtimeEndpoint", remoteRuntimeEndpoint, "imageEndpoint", remoteImageEndpoint)
klog.V(2).InfoS("Starting the GRPC server for the docker CRI shim.")
dockerServer := dockerremote.NewDockerServer(remoteRuntimeEndpoint, ds)
if err := dockerServer.Start(); err != nil {
return err

View File

@ -73,8 +73,7 @@ func (kl *Kubelet) updatePodCIDR(cidr string) (bool, error) {
// But it is better to be on the safe side to still return true here.
return true, fmt.Errorf("failed to update pod CIDR: %v", err)
}
klog.Infof("Setting Pod CIDR: %v -> %v", podCIDR, cidr)
klog.InfoS("Updating Pod CIDR", "originalPodCIDR", podCIDR, "newPodCIDR", cidr)
kl.runtimeState.setPodCIDR(cidr)
return true, nil
}

View File

@ -53,7 +53,7 @@ func (kl *Kubelet) initNetworkUtil() {
for i := range iptClients {
iptClient := iptClients[i]
if kl.syncNetworkUtil(iptClient) {
klog.Infof("Initialized %s iptables rules.", protocols[i])
klog.InfoS("Initialized protocol iptables rules.", "protocol", protocols[i])
go iptClient.Monitor(
utiliptables.Chain("KUBE-KUBELET-CANARY"),
[]utiliptables.Table{utiliptables.TableMangle, utiliptables.TableNAT, utiliptables.TableFilter},
@ -61,7 +61,7 @@ func (kl *Kubelet) initNetworkUtil() {
1*time.Minute, wait.NeverStop,
)
} else {
klog.Warningf("Failed to initialize %s iptables rules; some functionality may be missing.", protocols[i])
klog.InfoS("Failed to initialize protocol iptables rules; some functionality may be missing.", "protocol", protocols[i])
}
}
}
@ -76,22 +76,22 @@ func (kl *Kubelet) syncNetworkUtil(iptClient utiliptables.Interface) bool {
// Setup KUBE-MARK-DROP rules
dropMark := getIPTablesMark(kl.iptablesDropBit)
if _, err := iptClient.EnsureChain(utiliptables.TableNAT, KubeMarkDropChain); err != nil {
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkDropChain, err)
klog.ErrorS(err, "Failed to ensure that nat chain exists KUBE-MARK-DROP chain")
return false
}
if _, err := iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkDropChain, "-j", "MARK", "--or-mark", dropMark); err != nil {
klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkDropChain, err)
klog.ErrorS(err, "Failed to ensure marking rule for KUBE-MARK-DROP chain")
return false
}
if _, err := iptClient.EnsureChain(utiliptables.TableFilter, KubeFirewallChain); err != nil {
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, KubeFirewallChain, err)
klog.ErrorS(err, "Failed to ensure that filter table exists KUBE-FIREWALL chain")
return false
}
if _, err := iptClient.EnsureRule(utiliptables.Append, utiliptables.TableFilter, KubeFirewallChain,
"-m", "comment", "--comment", "kubernetes firewall for dropping marked packets",
"-m", "mark", "--mark", fmt.Sprintf("%s/%s", dropMark, dropMark),
"-j", "DROP"); err != nil {
klog.Errorf("Failed to ensure rule to drop packet marked by %v in %v chain %v: %v", KubeMarkDropChain, utiliptables.TableFilter, KubeFirewallChain, err)
klog.ErrorS(err, "Failed to ensure rule to drop packet marked by the KUBE-MARK-DROP in KUBE-FIREWALL chain")
return false
}
@ -105,37 +105,37 @@ func (kl *Kubelet) syncNetworkUtil(iptClient utiliptables.Interface) bool {
"-m", "conntrack",
"!", "--ctstate", "RELATED,ESTABLISHED,DNAT",
"-j", "DROP"); err != nil {
klog.Errorf("Failed to ensure rule to drop invalid localhost packets in %v chain %v: %v", utiliptables.TableFilter, KubeFirewallChain, err)
klog.ErrorS(err, "Failed to ensure rule to drop invalid localhost packets in filter table KUBE-FIREWALL chain")
return false
}
}
if _, err := iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainOutput, "-j", string(KubeFirewallChain)); err != nil {
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainOutput, KubeFirewallChain, err)
klog.ErrorS(err, "Failed to ensure that filter table from OUTPUT chain jumps to KUBE-FIREWALL chain")
return false
}
if _, err := iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainInput, "-j", string(KubeFirewallChain)); err != nil {
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainInput, KubeFirewallChain, err)
klog.ErrorS(err, "Failed to ensure that filter table INPUT chain jumps to KUBE-FIREWALL chain")
return false
}
// Setup KUBE-MARK-MASQ rules
masqueradeMark := getIPTablesMark(kl.iptablesMasqueradeBit)
if _, err := iptClient.EnsureChain(utiliptables.TableNAT, KubeMarkMasqChain); err != nil {
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkMasqChain, err)
klog.ErrorS(err, "Failed to ensure that nat table exists KUBE-MARK-MASQ chain")
return false
}
if _, err := iptClient.EnsureChain(utiliptables.TableNAT, KubePostroutingChain); err != nil {
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubePostroutingChain, err)
klog.ErrorS(err, "Failed to ensure that nat table exists kube POSTROUTING chain")
return false
}
if _, err := iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkMasqChain, "-j", "MARK", "--or-mark", masqueradeMark); err != nil {
klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkMasqChain, err)
klog.ErrorS(err, "Failed to ensure marking rule for KUBE-MARK-MASQ chain")
return false
}
if _, err := iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableNAT, utiliptables.ChainPostrouting,
"-m", "comment", "--comment", "kubernetes postrouting rules", "-j", string(KubePostroutingChain)); err != nil {
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, KubePostroutingChain, err)
klog.ErrorS(err, "Failed to ensure that nat table from POSTROUTING chain jumps to KUBE-POSTROUTING chain")
return false
}
@ -145,7 +145,7 @@ func (kl *Kubelet) syncNetworkUtil(iptClient utiliptables.Interface) bool {
if _, err := iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubePostroutingChain,
"-m", "mark", "!", "--mark", fmt.Sprintf("%s/%s", masqueradeMark, masqueradeMark),
"-j", "RETURN"); err != nil {
klog.Errorf("Failed to ensure filtering rule for %v: %v", KubePostroutingChain, err)
klog.ErrorS(err, "Failed to ensure filtering rule for KUBE-POSTROUTING chain")
return false
}
// Clear the mark to avoid re-masquerading if the packet re-traverses the network stack.
@ -153,7 +153,7 @@ func (kl *Kubelet) syncNetworkUtil(iptClient utiliptables.Interface) bool {
// to Sprintf another bitmask).
if _, err := iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubePostroutingChain,
"-j", "MARK", "--xor-mark", masqueradeMark); err != nil {
klog.Errorf("Failed to ensure unmarking rule for %v: %v", KubePostroutingChain, err)
klog.ErrorS(err, "Failed to ensure unmarking rule for KUBE-POSTROUTING chain")
return false
}
masqRule := []string{
@ -164,7 +164,7 @@ func (kl *Kubelet) syncNetworkUtil(iptClient utiliptables.Interface) bool {
masqRule = append(masqRule, "--random-fully")
}
if _, err := iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubePostroutingChain, masqRule...); err != nil {
klog.Errorf("Failed to ensure SNAT rule for packets marked by %v in %v chain %v: %v", KubeMarkMasqChain, utiliptables.TableNAT, KubePostroutingChain, err)
klog.ErrorS(err, "Failed to ensure SNAT rule for packets marked by KUBE-MARK-MASQ chain in nat table KUBE-POSTROUTING chain")
return false
}

View File

@ -123,7 +123,7 @@ func (c *Configurer) formDNSSearchFitsLimits(composedSearch []string, pod *v1.Po
if limitsExceeded {
log := fmt.Sprintf("Search Line limits were exceeded, some search paths have been omitted, the applied search line is: %s", strings.Join(composedSearch, " "))
c.recorder.Event(pod, v1.EventTypeWarning, "DNSConfigForming", log)
klog.Error(log)
klog.ErrorS(nil, "eventlog", log)
}
return composedSearch
}
@ -133,7 +133,7 @@ func (c *Configurer) formDNSNameserversFitsLimits(nameservers []string, pod *v1.
nameservers = nameservers[0:validation.MaxDNSNameservers]
log := fmt.Sprintf("Nameserver limits were exceeded, some nameservers have been omitted, the applied nameserver line is: %s", strings.Join(nameservers, " "))
c.recorder.Event(pod, v1.EventTypeWarning, "DNSConfigForming", log)
klog.Error(log)
klog.ErrorS(nil, "eventlog", log)
}
return nameservers
}
@ -161,7 +161,7 @@ func (c *Configurer) CheckLimitsForResolvConf() {
f, err := os.Open(c.ResolverConfig)
if err != nil {
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error())
klog.V(4).Infof("Check limits for resolv.conf failed at file open: %v", err)
klog.V(4).InfoS("Check limits for resolv.conf failed at file open", "err", err)
return
}
defer f.Close()
@ -169,7 +169,7 @@ func (c *Configurer) CheckLimitsForResolvConf() {
_, hostSearch, _, err := parseResolvConf(f)
if err != nil {
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error())
klog.V(4).Infof("Check limits for resolv.conf failed at parse resolv.conf: %v", err)
klog.V(4).InfoS("Check limits for resolv.conf failed at parse resolv.conf", "err", err)
return
}
@ -182,14 +182,14 @@ func (c *Configurer) CheckLimitsForResolvConf() {
if len(hostSearch) > domainCountLimit {
log := fmt.Sprintf("Resolv.conf file '%s' contains search line consisting of more than %d domains!", c.ResolverConfig, domainCountLimit)
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log)
klog.V(4).Infof("Check limits for resolv.conf failed: %s", log)
klog.V(4).InfoS("Check limits for resolv.conf failed", "eventlog", log)
return
}
if len(strings.Join(hostSearch, " ")) > validation.MaxDNSSearchListChars {
log := fmt.Sprintf("Resolv.conf file '%s' contains search line which length is more than allowed %d chars!", c.ResolverConfig, validation.MaxDNSSearchListChars)
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log)
klog.V(4).Infof("Check limits for resolv.conf failed: %s", log)
klog.V(4).InfoS("Check limits for resolv.conf failed", "eventlog", log)
return
}
}
@ -337,7 +337,7 @@ func (c *Configurer) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) {
dnsType, err := getPodDNSType(pod)
if err != nil {
klog.Errorf("Failed to get DNS type for pod %q: %v. Falling back to DNSClusterFirst policy.", format.Pod(pod), err)
klog.ErrorS(err, "Failed to get DNS type for pod. Falling back to DNSClusterFirst policy.", "pod", klog.KObj(pod))
dnsType = podDNSCluster
}
switch dnsType {
@ -404,12 +404,12 @@ func (c *Configurer) SetupDNSinContainerizedMounter(mounterPath string) {
if c.ResolverConfig != "" {
f, err := os.Open(c.ResolverConfig)
if err != nil {
klog.Error("Could not open resolverConf file")
klog.ErrorS(err, "Could not open resolverConf file")
} else {
defer f.Close()
_, hostSearch, _, err := parseResolvConf(f)
if err != nil {
klog.Errorf("Error for parsing the resolv.conf file: %v", err)
klog.ErrorS(err, "Error for parsing the resolv.conf file")
} else {
dnsString = dnsString + "search"
for _, search := range hostSearch {
@ -420,6 +420,6 @@ func (c *Configurer) SetupDNSinContainerizedMounter(mounterPath string) {
}
}
if err := ioutil.WriteFile(resolvePath, []byte(dnsString), 0600); err != nil {
klog.Errorf("Could not write dns nameserver in file %s, with error %v", resolvePath, err)
klog.ErrorS(err, "Could not write dns nameserver in the file", "path", resolvePath)
}
}