Move from glog to klog

- Move from the old github.com/golang/glog to k8s.io/klog
- klog as explicit InitFlags() so we add them as necessary
- we update the other repositories that we vendor that made a similar
change from glog to klog
  * github.com/kubernetes/repo-infra
  * k8s.io/gengo/
  * k8s.io/kube-openapi/
  * github.com/google/cadvisor
- Entirely remove all references to glog
- Fix some tests by explicit InitFlags in their init() methods

Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135
This commit is contained in:
Davanum Srinivas
2018-11-09 13:49:10 -05:00
parent 97baad34a7
commit 954996e231
1263 changed files with 10023 additions and 10076 deletions

View File

@@ -43,7 +43,7 @@ go_library(
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@@ -74,8 +74,8 @@ go_test(
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@@ -22,7 +22,7 @@ import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -84,12 +84,12 @@ func NewCloudNodeController(
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
if kubeClient != nil {
glog.V(0).Infof("Sending events to api server.")
klog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
} else {
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
klog.V(0).Infof("No api server defined - no events will be sent to API server.")
}
cnc := &CloudNodeController{
@@ -137,7 +137,7 @@ func (cnc *CloudNodeController) UpdateNodeStatus() {
nodes, err := cnc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
glog.Errorf("Error monitoring node status: %v", err)
klog.Errorf("Error monitoring node status: %v", err)
return
}
@@ -151,27 +151,27 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud
// Do not process nodes that are still tainted
cloudTaint := getCloudTaint(node.Spec.Taints)
if cloudTaint != nil {
glog.V(5).Infof("This node %s is still tainted. Will not process.", node.Name)
klog.V(5).Infof("This node %s is still tainted. Will not process.", node.Name)
return
}
// Node that isn't present according to the cloud provider shouldn't have its address updated
exists, err := ensureNodeExistsByProviderID(instances, node)
if err != nil {
// Continue to update node address when not sure the node is not exists
glog.Errorf("%v", err)
klog.Errorf("%v", err)
} else if !exists {
glog.V(4).Infof("The node %s is no longer present according to the cloud provider, do not process.", node.Name)
klog.V(4).Infof("The node %s is no longer present according to the cloud provider, do not process.", node.Name)
return
}
nodeAddresses, err := getNodeAddressesByProviderIDOrName(instances, node)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
return
}
if len(nodeAddresses) == 0 {
glog.V(5).Infof("Skipping node address update for node %q since cloud provider did not return any", node.Name)
klog.V(5).Infof("Skipping node address update for node %q since cloud provider did not return any", node.Name)
return
}
@@ -195,7 +195,7 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud
// it can be found in the cloud as well (consistent with the behaviour in kubelet)
if nodeIP, ok := ensureNodeProvidedIPExists(node, nodeAddresses); ok {
if nodeIP == nil {
glog.Errorf("Specified Node IP not found in cloudprovider")
klog.Errorf("Specified Node IP not found in cloudprovider")
return
}
}
@@ -206,7 +206,7 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud
}
_, _, err = nodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode)
if err != nil {
glog.Errorf("Error patching node with cloud ip addresses = [%v]", err)
klog.Errorf("Error patching node with cloud ip addresses = [%v]", err)
}
}
@@ -221,7 +221,7 @@ func (cnc *CloudNodeController) MonitorNode() {
nodes, err := cnc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
glog.Errorf("Error monitoring node status: %v", err)
klog.Errorf("Error monitoring node status: %v", err)
return
}
@@ -238,13 +238,13 @@ func (cnc *CloudNodeController) MonitorNode() {
name := node.Name
node, err = cnc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name)
klog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name)
break
}
time.Sleep(retrySleepTime)
}
if currentReadyCondition == nil {
glog.Errorf("Update status of Node %v from CloudNodeController exceeds retry count or the Node was deleted.", node.Name)
klog.Errorf("Update status of Node %v from CloudNodeController exceeds retry count or the Node was deleted.", node.Name)
continue
}
// If the known node status says that Node is NotReady, then check if the node has been removed
@@ -256,14 +256,14 @@ func (cnc *CloudNodeController) MonitorNode() {
// does not delete node from kubernetes cluster when instance it is shutdown see issue #46442
shutdown, err := nodectrlutil.ShutdownInCloudProvider(context.TODO(), cnc.cloud, node)
if err != nil {
glog.Errorf("Error checking if node %s is shutdown: %v", node.Name, err)
klog.Errorf("Error checking if node %s is shutdown: %v", node.Name, err)
}
if shutdown && err == nil {
// if node is shutdown add shutdown taint
err = controller.AddOrUpdateTaintOnNode(cnc.kubeClient, node.Name, controller.ShutdownTaint)
if err != nil {
glog.Errorf("Error patching node taints: %v", err)
klog.Errorf("Error patching node taints: %v", err)
}
// Continue checking the remaining nodes since the current one is shutdown.
continue
@@ -273,7 +273,7 @@ func (cnc *CloudNodeController) MonitorNode() {
// doesn't, delete the node immediately.
exists, err := ensureNodeExistsByProviderID(instances, node)
if err != nil {
glog.Errorf("Error checking if node %s exists: %v", node.Name, err)
klog.Errorf("Error checking if node %s exists: %v", node.Name, err)
continue
}
@@ -282,7 +282,7 @@ func (cnc *CloudNodeController) MonitorNode() {
continue
}
glog.V(2).Infof("Deleting node since it is no longer present in cloud provider: %s", node.Name)
klog.V(2).Infof("Deleting node since it is no longer present in cloud provider: %s", node.Name)
ref := &v1.ObjectReference{
Kind: "Node",
@@ -290,14 +290,14 @@ func (cnc *CloudNodeController) MonitorNode() {
UID: types.UID(node.UID),
Namespace: "",
}
glog.V(2).Infof("Recording %s event message for node %s", "DeletingNode", node.Name)
klog.V(2).Infof("Recording %s event message for node %s", "DeletingNode", node.Name)
cnc.recorder.Eventf(ref, v1.EventTypeNormal, fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name), "Node %s event: %s", node.Name, "DeletingNode")
go func(nodeName string) {
defer utilruntime.HandleCrash()
if err := cnc.kubeClient.CoreV1().Nodes().Delete(nodeName, nil); err != nil {
glog.Errorf("unable to delete node %q: %v", nodeName, err)
klog.Errorf("unable to delete node %q: %v", nodeName, err)
}
}(node.Name)
@@ -305,7 +305,7 @@ func (cnc *CloudNodeController) MonitorNode() {
// if taint exist remove taint
err = controller.RemoveTaintOffNode(cnc.kubeClient, node.Name, node, controller.ShutdownTaint)
if err != nil {
glog.Errorf("Error patching node taints: %v", err)
klog.Errorf("Error patching node taints: %v", err)
}
}
}
@@ -326,7 +326,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
cloudTaint := getCloudTaint(node.Spec.Taints)
if cloudTaint == nil {
glog.V(2).Infof("This node %s is registered without the cloud taint. Will not process.", node.Name)
klog.V(2).Infof("This node %s is registered without the cloud taint. Will not process.", node.Name)
return
}
@@ -365,7 +365,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
// we should attempt to set providerID on curNode, but
// we can continue if we fail since we will attempt to set
// node addresses given the node name in getNodeAddressesByProviderIDOrName
glog.Errorf("failed to set node provider id: %v", err)
klog.Errorf("failed to set node provider id: %v", err)
}
}
@@ -385,7 +385,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
if instanceType, err := getInstanceTypeByProviderIDOrName(instances, curNode); err != nil {
return err
} else if instanceType != "" {
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType)
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType)
curNode.ObjectMeta.Labels[kubeletapis.LabelInstanceType] = instanceType
}
@@ -395,11 +395,11 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
return fmt.Errorf("failed to get zone from cloud provider: %v", err)
}
if zone.FailureDomain != "" {
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain)
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain)
curNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = zone.FailureDomain
}
if zone.Region != "" {
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region)
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region)
curNode.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] = zone.Region
}
}
@@ -420,7 +420,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
return
}
glog.Infof("Successfully initialized node %s with cloud provider", node.Name)
klog.Infof("Successfully initialized node %s with cloud provider", node.Name)
}
func getCloudTaint(taints []v1.Taint) *v1.Taint {
@@ -458,7 +458,7 @@ func ensureNodeExistsByProviderID(instances cloudprovider.Instances, node *v1.No
}
if providerID == "" {
glog.Warningf("Cannot find valid providerID for node name %q, assuming non existence", node.Name)
klog.Warningf("Cannot find valid providerID for node name %q, assuming non existence", node.Name)
return false, nil
}
}

View File

@@ -37,8 +37,8 @@ import (
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"github.com/golang/glog"
"github.com/stretchr/testify/assert"
"k8s.io/klog"
)
func TestEnsureNodeExistsByProviderID(t *testing.T) {
@@ -250,7 +250,7 @@ func TestNodeShutdown(t *testing.T) {
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
nodeStatusUpdateFrequency: 1 * time.Second,
}
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
cloudNodeController.Run(wait.NeverStop)
@@ -349,7 +349,7 @@ func TestNodeDeleted(t *testing.T) {
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
nodeStatusUpdateFrequency: 1 * time.Second,
}
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
cloudNodeController.Run(wait.NeverStop)
@@ -429,7 +429,7 @@ func TestNodeInitialized(t *testing.T) {
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
nodeStatusUpdateFrequency: 1 * time.Second,
}
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
cloudNodeController.AddCloudNode(fnh.Existing[0])
@@ -494,7 +494,7 @@ func TestNodeIgnored(t *testing.T) {
nodeMonitorPeriod: 5 * time.Second,
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
}
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
cloudNodeController.AddCloudNode(fnh.Existing[0])
assert.Equal(t, 0, len(fnh.UpdatedNodes), "Node was wrongly updated")
@@ -568,7 +568,7 @@ func TestGCECondition(t *testing.T) {
nodeMonitorPeriod: 1 * time.Second,
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
}
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
cloudNodeController.AddCloudNode(fnh.Existing[0])
@@ -658,7 +658,7 @@ func TestZoneInitialized(t *testing.T) {
nodeMonitorPeriod: 5 * time.Second,
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
}
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
cloudNodeController.AddCloudNode(fnh.Existing[0])
@@ -749,7 +749,7 @@ func TestNodeAddresses(t *testing.T) {
nodeStatusUpdateFrequency: 1 * time.Second,
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
}
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
cloudNodeController.AddCloudNode(fnh.Existing[0])
@@ -864,7 +864,7 @@ func TestNodeProvidedIPAddresses(t *testing.T) {
nodeStatusUpdateFrequency: 1 * time.Second,
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
}
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
cloudNodeController.AddCloudNode(fnh.Existing[0])
@@ -1156,7 +1156,7 @@ func TestNodeProviderID(t *testing.T) {
nodeStatusUpdateFrequency: 1 * time.Second,
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
}
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
cloudNodeController.AddCloudNode(fnh.Existing[0])
@@ -1240,7 +1240,7 @@ func TestNodeProviderIDAlreadySet(t *testing.T) {
nodeStatusUpdateFrequency: 1 * time.Second,
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
}
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
cloudNodeController.AddCloudNode(fnh.Existing[0])

View File

@@ -22,7 +22,7 @@ import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
@@ -109,8 +109,8 @@ func (pvlc *PersistentVolumeLabelController) Run(threadiness int, stopCh <-chan
defer utilruntime.HandleCrash()
defer pvlc.queue.ShutDown()
glog.Infof("Starting PersistentVolumeLabelController")
defer glog.Infof("Shutting down PersistentVolumeLabelController")
klog.Infof("Starting PersistentVolumeLabelController")
defer klog.Infof("Shutting down PersistentVolumeLabelController")
go pvlc.pvlController.Run(stopCh)
@@ -197,7 +197,7 @@ func (pvlc *PersistentVolumeLabelController) addLabelsAndAffinityToVolume(vol *v
}
volumeLabels = labels
} else {
glog.V(4).Info("cloud provider does not support PVLabeler")
klog.V(4).Info("cloud provider does not support PVLabeler")
}
return pvlc.updateVolume(vol, volumeLabels)
}
@@ -244,7 +244,7 @@ func (pvlc *PersistentVolumeLabelController) createPatch(vol *v1.PersistentVolum
}
// Populate NodeAffinity with requirements if there are no conflicting keys found
if v1helper.NodeSelectorRequirementKeysExistInNodeSelectorTerms(requirements, newVolume.Spec.NodeAffinity.Required.NodeSelectorTerms) {
glog.V(4).Infof("NodeSelectorRequirements for cloud labels %v conflict with existing NodeAffinity %v. Skipping addition of NodeSelectorRequirements for cloud labels.",
klog.V(4).Infof("NodeSelectorRequirements for cloud labels %v conflict with existing NodeAffinity %v. Skipping addition of NodeSelectorRequirements for cloud labels.",
requirements, newVolume.Spec.NodeAffinity)
} else {
for _, req := range requirements {
@@ -255,7 +255,7 @@ func (pvlc *PersistentVolumeLabelController) createPatch(vol *v1.PersistentVolum
}
}
newVolume.Initializers = removeInitializer(newVolume.Initializers, initializerName)
glog.V(4).Infof("removed initializer on PersistentVolume %s", newVolume.Name)
klog.V(4).Infof("removed initializer on PersistentVolume %s", newVolume.Name)
oldData, err := json.Marshal(vol)
if err != nil {
@@ -276,7 +276,7 @@ func (pvlc *PersistentVolumeLabelController) createPatch(vol *v1.PersistentVolum
func (pvlc *PersistentVolumeLabelController) updateVolume(vol *v1.PersistentVolume, volLabels map[string]string) error {
volName := vol.Name
glog.V(4).Infof("updating PersistentVolume %s", volName)
klog.V(4).Infof("updating PersistentVolume %s", volName)
patchBytes, err := pvlc.createPatch(vol, volLabels)
if err != nil {
return err
@@ -286,7 +286,7 @@ func (pvlc *PersistentVolumeLabelController) updateVolume(vol *v1.PersistentVolu
if err != nil {
return fmt.Errorf("failed to update PersistentVolume %s: %v", volName, err)
}
glog.V(4).Infof("updated PersistentVolume %s", volName)
klog.V(4).Infof("updated PersistentVolume %s", volName)
return nil
}