Merge pull request #59232 from liubin/fix4

Automatic merge from submit-queue (batch tested with PRs 59607, 59232). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Make log content more information

And change some `fmt.Errorf` to `fmt.Error`
This commit is contained in:
Kubernetes Submit Queue
2018-02-09 08:54:34 -08:00
committed by GitHub
2 changed files with 12 additions and 14 deletions

View File

@@ -490,7 +490,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
} }
pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil { if err != nil {
glog.Errorf("failed to get existing persistent volumes") glog.Error("failed to get existing persistent volumes")
return err return err
} }
@@ -510,7 +510,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
gid, err := convertGid(gidStr) gid, err := convertGid(gidStr)
if err != nil { if err != nil {
glog.Error(err) glog.Errorf("failed to parse gid[%s]: %v", gidStr, err)
continue continue
} }
@@ -638,12 +638,12 @@ func (d *glusterfsVolumeDeleter) Delete() error {
} }
err = cli.VolumeDelete(volumeID) err = cli.VolumeDelete(volumeID)
if err != nil { if err != nil {
glog.Errorf("error when deleting the volume :%v", err) glog.Errorf("error when deleting the volume[%s]: %v", volumeName, err)
return err return err
} }
glog.V(2).Infof("volume %s deleted successfully", volumeName) glog.V(2).Infof("volume %s deleted successfully", volumeName)
//Deleter takes endpoint and endpointnamespace from pv spec. //Deleter takes endpoint and namespace from pv spec.
pvSpec := d.spec.Spec pvSpec := d.spec.Spec
var dynamicEndpoint, dynamicNamespace string var dynamicEndpoint, dynamicNamespace string
if pvSpec.ClaimRef == nil { if pvSpec.ClaimRef == nil {
@@ -891,7 +891,6 @@ func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (
} }
// getClusterNodes() returns the cluster nodes of a given cluster // getClusterNodes() returns the cluster nodes of a given cluster
func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, err error) { func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, err error) {
clusterinfo, err := cli.ClusterInfo(cluster) clusterinfo, err := cli.ClusterInfo(cluster)
if err != nil { if err != nil {
@@ -905,7 +904,7 @@ func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string,
for _, node := range clusterinfo.Nodes { for _, node := range clusterinfo.Nodes {
nodei, err := cli.NodeInfo(string(node)) nodei, err := cli.NodeInfo(string(node))
if err != nil { if err != nil {
glog.Errorf(" failed to get hostip: %v", err) glog.Errorf("failed to get hostip: %v", err)
return nil, fmt.Errorf("failed to get hostip: %v", err) return nil, fmt.Errorf("failed to get hostip: %v", err)
} }
ipaddr := dstrings.Join(nodei.NodeAddRequest.Hostnames.Storage, "") ipaddr := dstrings.Join(nodei.NodeAddRequest.Hostnames.Storage, "")
@@ -952,7 +951,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
case "gidmin": case "gidmin":
parseGidMin, err := convertGid(v) parseGidMin, err := convertGid(v)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid value %q for volume plugin %s", k, glusterfsPluginName) return nil, fmt.Errorf("invalid gidMin value %q for volume plugin %s", k, glusterfsPluginName)
} }
if parseGidMin < absoluteGidMin { if parseGidMin < absoluteGidMin {
return nil, fmt.Errorf("gidMin must be >= %v", absoluteGidMin) return nil, fmt.Errorf("gidMin must be >= %v", absoluteGidMin)
@@ -964,7 +963,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
case "gidmax": case "gidmax":
parseGidMax, err := convertGid(v) parseGidMax, err := convertGid(v)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid value %q for volume plugin %s", k, glusterfsPluginName) return nil, fmt.Errorf("invalid gidMax value %q for volume plugin %s", k, glusterfsPluginName)
} }
if parseGidMax < absoluteGidMin { if parseGidMax < absoluteGidMin {
return nil, fmt.Errorf("gidMax must be >= %v", absoluteGidMin) return nil, fmt.Errorf("gidMax must be >= %v", absoluteGidMin)
@@ -1095,12 +1094,12 @@ func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) {
func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
pvSpec := spec.PersistentVolume.Spec pvSpec := spec.PersistentVolume.Spec
glog.V(2).Infof("Request to expand volume: %s ", pvSpec.Glusterfs.Path)
volumeName := pvSpec.Glusterfs.Path volumeName := pvSpec.Glusterfs.Path
glog.V(2).Infof("Request to expand volume: %s ", volumeName)
volumeID, err := getVolumeID(spec.PersistentVolume, volumeName) volumeID, err := getVolumeID(spec.PersistentVolume, volumeName)
if err != nil { if err != nil {
return oldSize, fmt.Errorf("failed to get volumeID, err: %v", err) return oldSize, fmt.Errorf("failed to get volumeID for volume %s, err: %v", volumeName, err)
} }
//Get details of SC. //Get details of SC.
@@ -1127,13 +1126,12 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
expansionSizeGiB := int(volume.RoundUpSize(expansionSize, volume.GIB)) expansionSizeGiB := int(volume.RoundUpSize(expansionSize, volume.GIB))
// Find out requested Size // Find out requested Size
requestGiB := volume.RoundUpToGiB(newSize) requestGiB := volume.RoundUpToGiB(newSize)
//Check the existing volume size //Check the existing volume size
currentVolumeInfo, err := cli.VolumeInfo(volumeID) currentVolumeInfo, err := cli.VolumeInfo(volumeID)
if err != nil { if err != nil {
glog.Errorf("error when fetching details of volume :%v", err) glog.Errorf("error when fetching details of volume[%s]: %v", volumeName, err)
return oldSize, err return oldSize, err
} }
@@ -1147,7 +1145,7 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
// Expand the volume // Expand the volume
volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq) volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq)
if err != nil { if err != nil {
glog.Errorf("error when expanding the volume :%v", err) glog.Errorf("error when expanding the volume[%s]: %v", volumeName, err)
return oldSize, err return oldSize, err
} }

View File

@@ -27,7 +27,7 @@ import (
// readGlusterLog will take the last 2 lines of the log file // readGlusterLog will take the last 2 lines of the log file
// on failure of gluster SetUp and return those so kubelet can // on failure of gluster SetUp and return those so kubelet can
// properly expose them // properly expose them
// return nil on any failure // return error on any failure
func readGlusterLog(path string, podName string) error { func readGlusterLog(path string, podName string) error {
var line1 string var line1 string