Reformat log to show more details

This commit is contained in:
bin liu 2018-02-02 10:12:10 +08:00
parent 02f14bc694
commit e44779d86f
2 changed files with 12 additions and 14 deletions

View File

@ -496,7 +496,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
}
pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
glog.Errorf("failed to get existing persistent volumes")
glog.Error("failed to get existing persistent volumes")
return err
}
@ -516,7 +516,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
gid, err := convertGid(gidStr)
if err != nil {
glog.Error(err)
glog.Errorf("failed to parse gid[%s]: %v", gidStr, err)
continue
}
@ -644,12 +644,12 @@ func (d *glusterfsVolumeDeleter) Delete() error {
}
err = cli.VolumeDelete(volumeID)
if err != nil {
glog.Errorf("error when deleting the volume :%v", err)
glog.Errorf("error when deleting the volume(%s): %v", volumeName, err)
return err
}
glog.V(2).Infof("volume %s deleted successfully", volumeName)
//Deleter takes endpoint and endpointnamespace from pv spec.
//Deleter takes endpoint and namespace from pv spec.
pvSpec := d.spec.Spec
var dynamicEndpoint, dynamicNamespace string
if pvSpec.ClaimRef == nil {
@ -897,7 +897,6 @@ func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (
}
// getClusterNodes() returns the cluster nodes of a given cluster
func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, err error) {
clusterinfo, err := cli.ClusterInfo(cluster)
if err != nil {
@ -911,7 +910,7 @@ func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string,
for _, node := range clusterinfo.Nodes {
nodei, err := cli.NodeInfo(string(node))
if err != nil {
glog.Errorf(" failed to get hostip: %v", err)
glog.Errorf("failed to get hostip: %v", err)
return nil, fmt.Errorf("failed to get hostip: %v", err)
}
ipaddr := dstrings.Join(nodei.NodeAddRequest.Hostnames.Storage, "")
@ -958,7 +957,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
case "gidmin":
parseGidMin, err := convertGid(v)
if err != nil {
return nil, fmt.Errorf("invalid value %q for volume plugin %s", k, glusterfsPluginName)
return nil, fmt.Errorf("invalid gidMin value %q for volume plugin %s", k, glusterfsPluginName)
}
if parseGidMin < absoluteGidMin {
return nil, fmt.Errorf("gidMin must be >= %v", absoluteGidMin)
@ -970,7 +969,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
case "gidmax":
parseGidMax, err := convertGid(v)
if err != nil {
return nil, fmt.Errorf("invalid value %q for volume plugin %s", k, glusterfsPluginName)
return nil, fmt.Errorf("invalid gidMax value %q for volume plugin %s", k, glusterfsPluginName)
}
if parseGidMax < absoluteGidMin {
return nil, fmt.Errorf("gidMax must be >= %v", absoluteGidMin)
@ -1101,12 +1100,12 @@ func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) {
func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
pvSpec := spec.PersistentVolume.Spec
glog.V(2).Infof("Request to expand volume: %s ", pvSpec.Glusterfs.Path)
volumeName := pvSpec.Glusterfs.Path
glog.V(2).Infof("Request to expand volume: %s ", volumeName)
volumeID, err := getVolumeID(spec.PersistentVolume, volumeName)
if err != nil {
return oldSize, fmt.Errorf("failed to get volumeID, err: %v", err)
return oldSize, fmt.Errorf("failed to get volumeID for volume %s, err: %v", volumeName, err)
}
//Get details of SC.
@ -1133,13 +1132,12 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
expansionSizeGiB := int(volume.RoundUpSize(expansionSize, volume.GIB))
// Find out requested Size
requestGiB := volume.RoundUpToGiB(newSize)
//Check the existing volume size
currentVolumeInfo, err := cli.VolumeInfo(volumeID)
if err != nil {
glog.Errorf("error when fetching details of volume :%v", err)
glog.Errorf("error when fetching details of volume(%s): %v", volumeName, err)
return oldSize, err
}
@ -1153,7 +1151,7 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
// Expand the volume
volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq)
if err != nil {
glog.Errorf("error when expanding the volume :%v", err)
glog.Errorf("error when expanding the volume(%s): %v", volumeName, err)
return oldSize, err
}

View File

@ -27,7 +27,7 @@ import (
// readGlusterLog will take the last 2 lines of the log file
// on failure of gluster SetUp and return those so kubelet can
// properly expose them
// return nil on any failure
// return error on any failure
func readGlusterLog(path string, podName string) error {
var line1 string