mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
detach getClusterNodes() func from provisioner method.
Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
parent
a41e1c52ef
commit
5f91b02a61
@ -736,33 +736,6 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
func (p *glusterfsVolumeProvisioner) GetClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, err error) {
|
||||
clusterinfo, err := cli.ClusterInfo(cluster)
|
||||
if err != nil {
|
||||
glog.Errorf("glusterfs: failed to get cluster details: %v", err)
|
||||
return nil, fmt.Errorf("failed to get cluster details: %v", err)
|
||||
}
|
||||
|
||||
// For the dynamically provisioned volume, we gather the list of node IPs
|
||||
// of the cluster on which provisioned volume belongs to, as there can be multiple
|
||||
// clusters.
|
||||
for _, node := range clusterinfo.Nodes {
|
||||
nodei, err := cli.NodeInfo(string(node))
|
||||
if err != nil {
|
||||
glog.Errorf("glusterfs: failed to get hostip: %v", err)
|
||||
return nil, fmt.Errorf("failed to get hostip: %v", err)
|
||||
}
|
||||
ipaddr := dstrings.Join(nodei.NodeAddRequest.Hostnames.Storage, "")
|
||||
dynamicHostIps = append(dynamicHostIps, ipaddr)
|
||||
}
|
||||
glog.V(3).Infof("glusterfs: hostlist :%v", dynamicHostIps)
|
||||
if len(dynamicHostIps) == 0 {
|
||||
glog.Errorf("glusterfs: no hosts found: %v", err)
|
||||
return nil, fmt.Errorf("no hosts found: %v", err)
|
||||
}
|
||||
return dynamicHostIps, nil
|
||||
}
|
||||
|
||||
func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, err error) {
|
||||
var clusterIDs []string
|
||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
@ -790,7 +763,7 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum
|
||||
return nil, 0, fmt.Errorf("error creating volume %v", err)
|
||||
}
|
||||
glog.V(1).Infof("glusterfs: volume with size: %d and name: %s created", volume.Size, volume.Name)
|
||||
dynamicHostIps, err := p.GetClusterNodes(cli, volume.Cluster)
|
||||
dynamicHostIps, err := getClusterNodes(cli, volume.Cluster)
|
||||
if err != nil {
|
||||
glog.Errorf("glusterfs: error [%v] when getting cluster nodes for volume %s", err, volume)
|
||||
return nil, 0, fmt.Errorf("error [%v] when getting cluster nodes for volume %s", err, volume)
|
||||
@ -909,6 +882,35 @@ func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
// getClusterNodes() returns the cluster nodes of a given cluster
|
||||
|
||||
func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, err error) {
|
||||
clusterinfo, err := cli.ClusterInfo(cluster)
|
||||
if err != nil {
|
||||
glog.Errorf("glusterfs: failed to get cluster details: %v", err)
|
||||
return nil, fmt.Errorf("failed to get cluster details: %v", err)
|
||||
}
|
||||
|
||||
// For the dynamically provisioned volume, we gather the list of node IPs
|
||||
// of the cluster on which provisioned volume belongs to, as there can be multiple
|
||||
// clusters.
|
||||
for _, node := range clusterinfo.Nodes {
|
||||
nodei, err := cli.NodeInfo(string(node))
|
||||
if err != nil {
|
||||
glog.Errorf("glusterfs: failed to get hostip: %v", err)
|
||||
return nil, fmt.Errorf("failed to get hostip: %v", err)
|
||||
}
|
||||
ipaddr := dstrings.Join(nodei.NodeAddRequest.Hostnames.Storage, "")
|
||||
dynamicHostIps = append(dynamicHostIps, ipaddr)
|
||||
}
|
||||
glog.V(3).Infof("glusterfs: hostlist :%v", dynamicHostIps)
|
||||
if len(dynamicHostIps) == 0 {
|
||||
glog.Errorf("glusterfs: no hosts found: %v", err)
|
||||
return nil, fmt.Errorf("no hosts found: %v", err)
|
||||
}
|
||||
return dynamicHostIps, nil
|
||||
}
|
||||
|
||||
// parseClassParameters parses StorageClass.Parameters
|
||||
func parseClassParameters(params map[string]string, kubeClient clientset.Interface) (*provisionerConfig, error) {
|
||||
var cfg provisionerConfig
|
||||
|
Loading…
Reference in New Issue
Block a user