From 2bd91dda64b857ed2f45542a7aae42f855e931d1 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 29 Jun 2018 15:38:52 -0400 Subject: [PATCH] kubernetes: fix printf format errors These are all flagged by Go 1.11's more accurate printf checking in go vet, which runs as part of go test. Lubomir I. Ivanov applied ammend for: pkg/cloudprovider/provivers/vsphere/nodemanager.go --- cmd/kubeadm/app/phases/kubelet/flags.go | 2 +- pkg/cloudprovider/providers/aws/aws.go | 2 +- pkg/cloudprovider/providers/aws/volumes.go | 2 +- pkg/cloudprovider/providers/azure/azure_client.go | 8 ++++---- pkg/cloudprovider/providers/azure/azure_loadbalancer.go | 4 ++-- pkg/cloudprovider/providers/azure/azure_vmss.go | 4 ++-- pkg/cloudprovider/providers/azure/azure_vmss_cache.go | 2 +- pkg/cloudprovider/providers/gce/gce_address_manager.go | 2 +- pkg/cloudprovider/providers/vsphere/nodemanager.go | 8 ++++---- .../providers/vsphere/vclib/diskmanagers/vmdm.go | 6 +++--- pkg/cloudprovider/providers/vsphere/vsphere.go | 6 +++--- pkg/cloudprovider/providers/vsphere/vsphere_util.go | 2 +- pkg/controller/garbagecollector/garbagecollector.go | 2 +- .../volume/attachdetach/attach_detach_controller.go | 2 +- pkg/controller/volume/attachdetach/util/util.go | 2 +- .../volume/pvprotection/pv_protection_controller.go | 2 +- pkg/kubelet/kubelet_pods.go | 2 +- .../populator/desired_state_of_world_populator.go | 2 +- pkg/proxy/ipvs/proxier.go | 2 +- pkg/util/mount/mount_linux.go | 2 +- pkg/volume/cinder/attacher_test.go | 2 +- pkg/volume/fc/fc_util.go | 2 +- pkg/volume/iscsi/iscsi_util.go | 2 +- pkg/volume/portworx/portworx_util.go | 2 +- .../storage/storageclass/setdefault/admission.go | 2 +- staging/src/k8s.io/apiserver/pkg/storage/errors.go | 2 +- 26 files changed, 38 insertions(+), 38 deletions(-) diff --git a/cmd/kubeadm/app/phases/kubelet/flags.go b/cmd/kubeadm/app/phases/kubelet/flags.go index 252e6be84c2..3e06bc2fcd4 100644 --- a/cmd/kubeadm/app/phases/kubelet/flags.go +++ b/cmd/kubeadm/app/phases/kubelet/flags.go @@ -97,7 +97,7 @@ func buildKubeletArgMap(opts kubeletFlagsOpts) map[string]string { // Make sure the node name we're passed will work with Kubelet if opts.nodeRegOpts.Name != "" && opts.nodeRegOpts.Name != opts.defaultHostname { - glog.V(1).Info("setting kubelet hostname-override to %q", opts.nodeRegOpts.Name) + glog.V(1).Infof("setting kubelet hostname-override to %q", opts.nodeRegOpts.Name) kubeletFlags["hostname-override"] = opts.nodeRegOpts.Name } diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index a790c088864..63236cf9d84 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -2358,7 +2358,7 @@ func (c *Cloud) GetVolumeLabels(volumeName KubernetesVolumeID) (map[string]strin labels := make(map[string]string) az := aws.StringValue(info.AvailabilityZone) if az == "" { - return nil, fmt.Errorf("volume did not have AZ information: %q", info.VolumeId) + return nil, fmt.Errorf("volume did not have AZ information: %q", aws.StringValue(info.VolumeId)) } labels[kubeletapis.LabelZoneFailureDomain] = az diff --git a/pkg/cloudprovider/providers/aws/volumes.go b/pkg/cloudprovider/providers/aws/volumes.go index 48aaded947c..cb3a2aa275a 100644 --- a/pkg/cloudprovider/providers/aws/volumes.go +++ b/pkg/cloudprovider/providers/aws/volumes.go @@ -119,7 +119,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type info, err := disk.describeVolume() if err != nil { - glog.Warning("Error describing volume %s with %v", diskName, err) + glog.Warningf("Error describing volume %s with %v", diskName, err) awsDiskInfo.volumeState = "unknown" return awsDiskInfo, false, err } diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index 5641832ca9a..c37e631f77f 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -845,9 +845,9 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro return } - glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): start", resourceGroupName) + glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): end", resourceGroupName) + glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName) }() mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID) @@ -876,9 +876,9 @@ func (az *azVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, return } - glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): start", resourceGroupName, VMScaleSetName, VMInstanceIDs) + glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): start", resourceGroupName, VMScaleSetName, VMInstanceIDs) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): end", resourceGroupName, VMScaleSetName, VMInstanceIDs) + glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): end", resourceGroupName, VMScaleSetName, VMInstanceIDs) }() mc := newMetricContext("vmss", "update_instances", resourceGroupName, az.client.SubscriptionID) diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index fca68fbb404..74bebc6dd6f 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -262,7 +262,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) - glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%s) - start", serviceName, isInternal) + glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal) vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes) if err != nil { glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) @@ -842,7 +842,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, // This entails adding required, missing SecurityRules and removing stale rules. func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) { serviceName := getServiceName(service) - glog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q lbName=%q", serviceName, clusterName) + glog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName) ports := service.Spec.Ports if ports == nil { diff --git a/pkg/cloudprovider/providers/azure/azure_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go index c7fa190b164..0cf6bf841a8 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss.go @@ -475,7 +475,7 @@ func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineSca glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr) return false, nil } - glog.V(4).Info("backoff: success for scale set %q", name) + glog.V(4).Infof("backoff: success for scale set %q", name) if cached != nil { exists = true @@ -845,7 +845,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID) if err != nil { - glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it") + glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it", *ipConfigurations.ID) continue } diff --git a/pkg/cloudprovider/providers/azure/azure_vmss_cache.go b/pkg/cloudprovider/providers/azure/azure_vmss_cache.go index 5542b7ca0b8..b3c1b0e99e1 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss_cache.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss_cache.go @@ -99,7 +99,7 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) { for _, vm := range vms { if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil { - glog.Warningf("failed to get computerName for vmssVM (%q)", vm.Name) + glog.Warningf("failed to get computerName for vmssVM (%q)", ssName) continue } diff --git a/pkg/cloudprovider/providers/gce/gce_address_manager.go b/pkg/cloudprovider/providers/gce/gce_address_manager.go index 4f642a92ccf..449b33a0d21 100644 --- a/pkg/cloudprovider/providers/gce/gce_address_manager.go +++ b/pkg/cloudprovider/providers/gce/gce_address_manager.go @@ -169,7 +169,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) { if am.isManagedAddress(addr) { // The address with this name is checked at the beginning of 'HoldAddress()', but for some reason // it was re-created by this point. May be possible that two controllers are running. - glog.Warning("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP) + glog.Warningf("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP) } else { // If the retrieved address is not named with the loadbalancer name, then the controller does not own it, but will allow use of it. glog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description) diff --git a/pkg/cloudprovider/providers/vsphere/nodemanager.go b/pkg/cloudprovider/providers/vsphere/nodemanager.go index 81aa928e99f..56ff71727f2 100644 --- a/pkg/cloudprovider/providers/vsphere/nodemanager.go +++ b/pkg/cloudprovider/providers/vsphere/nodemanager.go @@ -176,13 +176,13 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { defer cancel() vm, err := res.datacenter.GetVMByUUID(ctx, nodeUUID) if err != nil { - glog.V(4).Infof("Error %q while looking for vm=%+v in vc=%s and datacenter=%s", - err, node.Name, vm, res.vc, res.datacenter.Name()) + glog.V(4).Infof("Error while looking for vm=%+v in vc=%s and datacenter=%s: %v", + vm, res.vc, res.datacenter.Name(), err) if err != vclib.ErrNoVMFound { setGlobalErr(err) } else { glog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s", - node.Name, res.vc, res.datacenter.Name(), err) + node.Name, res.vc, res.datacenter.Name()) } continue } @@ -309,7 +309,7 @@ func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) { if err != nil { return nil, err } - glog.V(4).Infof("Updated NodeInfo %q for node %q.", nodeInfo, nodeName) + glog.V(4).Infof("Updated NodeInfo %v for node %q.", nodeInfo, nodeName) nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm, nodeInfo.vmUUID}) } return nodeDetails, nil diff --git a/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go b/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go index 1979d572525..551a54c6c2c 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go @@ -102,7 +102,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto dummyVM, err = datastore.Datacenter.GetVMByPath(ctx, vmdisk.vmOptions.VMFolder.InventoryPath+"/"+dummyVMFullName) if err != nil { // Create a dummy VM - glog.V(1).Info("Creating Dummy VM: %q", dummyVMFullName) + glog.V(1).Infof("Creating Dummy VM: %q", dummyVMFullName) dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName) if err != nil { glog.Errorf("Failed to create Dummy VM. err: %v", err) @@ -132,7 +132,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto fileAlreadyExist = isAlreadyExists(vmdisk.diskPath, err) if fileAlreadyExist { //Skip error and continue to detach the disk as the disk was already created on the datastore. - glog.V(vclib.LogLevel).Info("File: %v already exists", vmdisk.diskPath) + glog.V(vclib.LogLevel).Infof("File: %v already exists", vmdisk.diskPath) } else { glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err) return "", err @@ -143,7 +143,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto if err != nil { if vclib.DiskNotFoundErrMsg == err.Error() && fileAlreadyExist { // Skip error if disk was already detached from the dummy VM but still present on the datastore. - glog.V(vclib.LogLevel).Info("File: %v is already detached", vmdisk.diskPath) + glog.V(vclib.LogLevel).Infof("File: %v is already detached", vmdisk.diskPath) } else { glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err) return "", err diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 62a9cfb2e88..b450787d669 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -846,7 +846,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN if err == nil { glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName)) diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName) - glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", convertToString(nodeName), diskUUID, err) + glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", diskUUID, err) } } } @@ -963,7 +963,7 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b volPath, vSphereInstance) } - glog.V(4).Infof("DiskIsAttached result: %q and error: %q, for volume: %q", attached, err, volPath) + glog.V(4).Infof("DiskIsAttached result: %v and error: %q, for volume: %q", attached, err, volPath) return attached, err } requestTime := time.Now() @@ -1054,7 +1054,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) return nodesToRetry, nil } - glog.V(4).Info("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes) + glog.V(4).Infof("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes) // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/pkg/cloudprovider/providers/vsphere/vsphere_util.go index bc575838e84..1a5724e1430 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -479,7 +479,7 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN return nodesToRetry, err } nodeUUID = strings.ToLower(nodeUUID) - glog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %s", nodeName, nodeUUID, vmMoMap) + glog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap) vclib.VerifyVolumePathsForVM(vmMoMap[nodeUUID], nodeVolumes[nodeName], convertToString(nodeName), attached) } return nodesToRetry, nil diff --git a/pkg/controller/garbagecollector/garbagecollector.go b/pkg/controller/garbagecollector/garbagecollector.go index 644949eca8b..5f1fff3effc 100644 --- a/pkg/controller/garbagecollector/garbagecollector.go +++ b/pkg/controller/garbagecollector/garbagecollector.go @@ -460,7 +460,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { switch { case len(solid) != 0: - glog.V(2).Infof("object %s has at least one existing owner: %#v, will not garbage collect", solid, item.identity) + glog.V(2).Infof("object %#v has at least one existing owner: %#v, will not garbage collect", solid, item.identity) if len(dangling) == 0 && len(waitingForDependentsDeletion) == 0 { return nil } diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller.go b/pkg/controller/volume/attachdetach/attach_detach_controller.go index 1847d2a2f39..91982b46d77 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -505,7 +505,7 @@ func (adc *attachDetachController) processVolumesInUse( err := adc.actualStateOfWorld.SetVolumeMountedByNode(attachedVolume.VolumeName, nodeName, mounted) if err != nil { glog.Warningf( - "SetVolumeMountedByNode(%q, %q, %q) returned an error: %v", + "SetVolumeMountedByNode(%q, %q, %v) returned an error: %v", attachedVolume.VolumeName, nodeName, mounted, err) } } diff --git a/pkg/controller/volume/attachdetach/util/util.go b/pkg/controller/volume/attachdetach/util/util.go index 6f3dfd0a783..c17ab729906 100644 --- a/pkg/controller/volume/attachdetach/util/util.go +++ b/pkg/controller/volume/attachdetach/util/util.go @@ -68,7 +68,7 @@ func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, pvcLister coreli glog.V(10).Infof( "Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)", - volumeSpec.Name, + volumeSpec.Name(), pvName, podNamespace, pvcSource.ClaimName, diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller.go b/pkg/controller/volume/pvprotection/pv_protection_controller.go index 2f70a76212b..7764c693827 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller.go @@ -163,7 +163,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error { pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer) _, err := c.client.CoreV1().PersistentVolumes().Update(pvClone) if err != nil { - glog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name) + glog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err) return err } glog.V(3).Infof("Added protection finalizer to PV %s", pv.Name) diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 0e9ea8682b7..9265f624141 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -277,7 +277,7 @@ func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.M case *mountMode == v1.MountPropagationNone: return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil default: - return 0, fmt.Errorf("invalid MountPropagation mode: %q", mountMode) + return 0, fmt.Errorf("invalid MountPropagation mode: %q", *mountMode) } } diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index e65fdc1b2f8..48f71621af0 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -513,7 +513,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( glog.V(5).Infof( "Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)", - volumeSpec.Name, + volumeSpec.Name(), pvName, podNamespace, pvcSource.ClaimName, diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 10dd36f226f..128734d5f2f 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -581,7 +581,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset err = ipset.DestroySet(set.name) if err != nil { if !utilipset.IsNotFoundError(err) { - glog.Errorf("Error removing ipset %s, error: %v", set, err) + glog.Errorf("Error removing ipset %s, error: %v", set.name, err) encounteredError = true } } diff --git a/pkg/util/mount/mount_linux.go b/pkg/util/mount/mount_linux.go index 5f8e3905211..33a3cb8e735 100644 --- a/pkg/util/mount/mount_linux.go +++ b/pkg/util/mount/mount_linux.go @@ -552,7 +552,7 @@ func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { } if len(pttype) > 0 { - glog.V(4).Infof("Disk %s detected partition table type: %s", pttype) + glog.V(4).Infof("Disk %s detected partition table type: %s", disk, pttype) // Returns a special non-empty string as filesystem type, then kubelet // will not format it. return "unknown data, probably partitions", nil diff --git a/pkg/volume/cinder/attacher_test.go b/pkg/volume/cinder/attacher_test.go index 6acb573a51b..00b1f574757 100644 --- a/pkg/volume/cinder/attacher_test.go +++ b/pkg/volume/cinder/attacher_test.go @@ -610,7 +610,7 @@ func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong instanceID") } - glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret) + glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret) return expected.isAttached, expected.instanceID, expected.ret } diff --git a/pkg/volume/fc/fc_util.go b/pkg/volume/fc/fc_util.go index 39082754102..a68925a5ec4 100644 --- a/pkg/volume/fc/fc_util.go +++ b/pkg/volume/fc/fc_util.go @@ -365,7 +365,7 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri if err.Error() != volumepathhandler.ErrDeviceNotFound { return fmt.Errorf("fc: failed to get loopback for destination path: %v, err: %v", dstPath, err) } - glog.Warning("fc: loopback for destination path: %s not found", dstPath) + glog.Warningf("fc: loopback for destination path: %s not found", dstPath) } // Detach volume from kubelet node diff --git a/pkg/volume/iscsi/iscsi_util.go b/pkg/volume/iscsi/iscsi_util.go index d649e351729..24d103cf3b9 100644 --- a/pkg/volume/iscsi/iscsi_util.go +++ b/pkg/volume/iscsi/iscsi_util.go @@ -516,7 +516,7 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) if err.Error() != volumepathhandler.ErrDeviceNotFound { return fmt.Errorf("failed to get loopback for device: %v, err: %v", devicePath, err) } - glog.Warning("iscsi: loopback for device: %s not found", device) + glog.Warningf("iscsi: loopback for device: %s not found", device) } // Detach a volume from kubelet node err = util.detachISCSIDisk(c.exec, portals, iqn, iface, volName, initiatorName, found) diff --git a/pkg/volume/portworx/portworx_util.go b/pkg/volume/portworx/portworx_util.go index 1a2ae3f44dc..ab68e407268 100644 --- a/pkg/volume/portworx/portworx_util.go +++ b/pkg/volume/portworx/portworx_util.go @@ -211,7 +211,7 @@ func (util *PortworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource newSizeInBytes := uint64(volutil.RoundUpToGiB(newSize) * volutil.GIB) if vol.Spec.Size >= newSizeInBytes { glog.Infof("Portworx volume: %s already at size: %d greater than or equal to new "+ - "requested size: %d. Skipping resize.", vol.Spec.Size, newSizeInBytes) + "requested size: %d. Skipping resize.", spec.Name(), vol.Spec.Size, newSizeInBytes) return nil } diff --git a/plugin/pkg/admission/storage/storageclass/setdefault/admission.go b/plugin/pkg/admission/storage/storageclass/setdefault/admission.go index 70281518821..7b46e6b2c06 100644 --- a/plugin/pkg/admission/storage/storageclass/setdefault/admission.go +++ b/plugin/pkg/admission/storage/storageclass/setdefault/admission.go @@ -140,7 +140,7 @@ func getDefaultClass(lister storagelisters.StorageClassLister) (*storage.Storage return nil, nil } if len(defaultClasses) > 1 { - glog.V(4).Infof("getDefaultClass %s defaults found", len(defaultClasses)) + glog.V(4).Infof("getDefaultClass %d defaults found", len(defaultClasses)) return nil, errors.NewInternalError(fmt.Errorf("%d default StorageClasses were found", len(defaultClasses))) } return defaultClasses[0], nil diff --git a/staging/src/k8s.io/apiserver/pkg/storage/errors.go b/staging/src/k8s.io/apiserver/pkg/storage/errors.go index a4d134ac991..f73d47aedc8 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/errors.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/errors.go @@ -166,5 +166,5 @@ func NewInternalError(reason string) InternalError { } func NewInternalErrorf(format string, a ...interface{}) InternalError { - return InternalError{fmt.Sprintf(format, a)} + return InternalError{fmt.Sprintf(format, a...)} }