mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Cleanup: Audit log and error capitalization
This commit is contained in:
parent
6a2d0f67d1
commit
346fdbccf0
@ -1338,7 +1338,7 @@ func (kl *Kubelet) initializeModules() error {
|
||||
|
||||
// Start out of memory watcher.
|
||||
if err := kl.oomWatcher.Start(kl.nodeRef); err != nil {
|
||||
return fmt.Errorf("Failed to start OOM watcher %v", err)
|
||||
return fmt.Errorf("failed to start OOM watcher %v", err)
|
||||
}
|
||||
|
||||
// Start resource analyzer
|
||||
|
@ -294,7 +294,7 @@ func (c *CRDFinalizer) processNextWorkItem() bool {
|
||||
func (c *CRDFinalizer) enqueue(obj *apiextensions.CustomResourceDefinition) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", obj, err))
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", obj, err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -230,7 +230,7 @@ func (c *ConditionController) processNextWorkItem() bool {
|
||||
func (c *ConditionController) enqueue(obj *apiextensions.CustomResourceDefinition) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", obj, err))
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", obj, err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -326,7 +326,7 @@ func (c *NamingConditionController) processNextWorkItem() bool {
|
||||
func (c *NamingConditionController) enqueue(obj *apiextensions.CustomResourceDefinition) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", obj, err))
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", obj, err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -128,12 +128,12 @@ func ensureTags(gvk schema.GroupVersionKind, tp reflect.Type, parents []reflect.
|
||||
f := tp.Field(i)
|
||||
jsonTag := f.Tag.Get("json")
|
||||
if len(jsonTag) == 0 {
|
||||
errs = append(errs, fmt.Errorf("External types should have json tags. %#v tags on field %v are: %s.\n%s", gvk, f.Name, f.Tag, fmtParentString(parents)))
|
||||
errs = append(errs, fmt.Errorf("external types should have json tags. %#v tags on field %v are: %s.\n%s", gvk, f.Name, f.Tag, fmtParentString(parents)))
|
||||
}
|
||||
|
||||
jsonTagName := strings.Split(jsonTag, ",")[0]
|
||||
if len(jsonTagName) > 0 && (jsonTagName[0] < 'a' || jsonTagName[0] > 'z') && jsonTagName != "-" && allowedNonstandardJSONNames[tp] != jsonTagName {
|
||||
errs = append(errs, fmt.Errorf("External types should have json names starting with lowercase letter. %#v has json tag on field %v with name %s.\n%s", gvk, f.Name, jsonTagName, fmtParentString(parents)))
|
||||
errs = append(errs, fmt.Errorf("external types should have json names starting with lowercase letter. %#v has json tag on field %v with name %s.\n%s", gvk, f.Name, jsonTagName, fmtParentString(parents)))
|
||||
}
|
||||
|
||||
errs = append(errs, ensureTags(gvk, f.Type, parents, allowedNonstandardJSONNames)...)
|
||||
|
@ -94,7 +94,7 @@ func parseBool(key string) bool {
|
||||
}
|
||||
value, err := strconv.ParseBool(key)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't parse '%s' as bool for unstructured mismatch detection", key))
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't parse '%s' as bool for unstructured mismatch detection", key))
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
@ -135,18 +135,18 @@ func main() {
|
||||
// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
|
||||
result, getErr := client.Resource(deploymentRes).Namespace(namespace).Get("demo-deployment", metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
panic(fmt.Errorf("Failed to get latest version of Deployment: %v", getErr))
|
||||
panic(fmt.Errorf("failed to get latest version of Deployment: %v", getErr))
|
||||
}
|
||||
|
||||
// update replicas to 1
|
||||
if err := unstructured.SetNestedField(result.Object, int64(1), "spec", "replicas"); err != nil {
|
||||
panic(fmt.Errorf("Failed to set replica value: %v", err))
|
||||
panic(fmt.Errorf("failed to set replica value: %v", err))
|
||||
}
|
||||
|
||||
// extract spec containers
|
||||
containers, found, err := unstructured.NestedSlice(result.Object, "spec", "template", "spec", "containers")
|
||||
if err != nil || !found || containers == nil {
|
||||
panic(fmt.Errorf("Deployment containers not found or error in spec: %v", err))
|
||||
panic(fmt.Errorf("deployment containers not found or error in spec: %v", err))
|
||||
}
|
||||
|
||||
// update container[0] image
|
||||
@ -161,7 +161,7 @@ func main() {
|
||||
return updateErr
|
||||
})
|
||||
if retryErr != nil {
|
||||
panic(fmt.Errorf("Update failed: %v", retryErr))
|
||||
panic(fmt.Errorf("update failed: %v", retryErr))
|
||||
}
|
||||
fmt.Println("Updated deployment...")
|
||||
|
||||
|
@ -55,7 +55,7 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error {
|
||||
pluginsLock.Lock()
|
||||
defer pluginsLock.Unlock()
|
||||
if _, found := plugins[name]; found {
|
||||
return fmt.Errorf("Auth Provider Plugin %q was registered twice", name)
|
||||
return fmt.Errorf("auth Provider Plugin %q was registered twice", name)
|
||||
}
|
||||
klog.V(4).Infof("Registered Auth Provider Plugin %q", name)
|
||||
plugins[name] = plugin
|
||||
@ -67,7 +67,7 @@ func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig
|
||||
defer pluginsLock.Unlock()
|
||||
p, ok := plugins[apc.Name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name)
|
||||
return nil, fmt.Errorf("no Auth Provider found for name %q", apc.Name)
|
||||
}
|
||||
return p(clusterAddress, apc.Config, persister)
|
||||
}
|
||||
|
@ -868,13 +868,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
|
||||
// 3. Apiserver closes connection.
|
||||
// 4. client-go should catch this and return an error.
|
||||
klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
|
||||
streamErr := fmt.Errorf("Stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err)
|
||||
streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err)
|
||||
return Result{
|
||||
err: streamErr,
|
||||
}
|
||||
default:
|
||||
klog.Errorf("Unexpected error when reading response body: %v", err)
|
||||
unexpectedErr := fmt.Errorf("Unexpected error when reading response body. Please retry. Original error: %v", err)
|
||||
unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %v", err)
|
||||
return Result{
|
||||
err: unexpectedErr,
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
errlist = append(errlist, fmt.Errorf("Error loading config file \"%s\": %v", filename, err))
|
||||
errlist = append(errlist, fmt.Errorf("error loading config file \"%s\": %v", filename, err))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -467,7 +467,7 @@ func ResolveLocalPaths(config *clientcmdapi.Config) error {
|
||||
}
|
||||
base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err)
|
||||
return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err)
|
||||
}
|
||||
|
||||
if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil {
|
||||
@ -480,7 +480,7 @@ func ResolveLocalPaths(config *clientcmdapi.Config) error {
|
||||
}
|
||||
base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err)
|
||||
return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err)
|
||||
}
|
||||
|
||||
if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil {
|
||||
|
@ -90,20 +90,20 @@ func parsePorts(ports []string) ([]ForwardedPort, error) {
|
||||
}
|
||||
remoteString = parts[1]
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid port format '%s'", portString)
|
||||
return nil, fmt.Errorf("invalid port format '%s'", portString)
|
||||
}
|
||||
|
||||
localPort, err := strconv.ParseUint(localString, 10, 16)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing local port '%s': %s", localString, err)
|
||||
return nil, fmt.Errorf("error parsing local port '%s': %s", localString, err)
|
||||
}
|
||||
|
||||
remotePort, err := strconv.ParseUint(remoteString, 10, 16)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing remote port '%s': %s", remoteString, err)
|
||||
return nil, fmt.Errorf("error parsing remote port '%s': %s", remoteString, err)
|
||||
}
|
||||
if remotePort == 0 {
|
||||
return nil, fmt.Errorf("Remote port must be > 0")
|
||||
return nil, fmt.Errorf("remote port must be > 0")
|
||||
}
|
||||
|
||||
forwards = append(forwards, ForwardedPort{uint16(localPort), uint16(remotePort)})
|
||||
@ -159,14 +159,14 @@ func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, rea
|
||||
// NewOnAddresses creates a new PortForwarder with custom listen addresses.
|
||||
func NewOnAddresses(dialer httpstream.Dialer, addresses []string, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
|
||||
if len(addresses) == 0 {
|
||||
return nil, errors.New("You must specify at least 1 address")
|
||||
return nil, errors.New("you must specify at least 1 address")
|
||||
}
|
||||
parsedAddresses, err := parseAddresses(addresses)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(ports) == 0 {
|
||||
return nil, errors.New("You must specify at least 1 port")
|
||||
return nil, errors.New("you must specify at least 1 port")
|
||||
}
|
||||
parsedPorts, err := parsePorts(ports)
|
||||
if err != nil {
|
||||
@ -219,7 +219,7 @@ func (pf *PortForwarder) forward() error {
|
||||
}
|
||||
|
||||
if !listenSuccess {
|
||||
return fmt.Errorf("Unable to listen on any of the requested ports: %v", pf.ports)
|
||||
return fmt.Errorf("unable to listen on any of the requested ports: %v", pf.ports)
|
||||
}
|
||||
|
||||
if pf.Ready != nil {
|
||||
@ -277,7 +277,7 @@ func (pf *PortForwarder) listenOnPortAndAddress(port *ForwardedPort, protocol st
|
||||
func (pf *PortForwarder) getListener(protocol string, hostname string, port *ForwardedPort) (net.Listener, error) {
|
||||
listener, err := net.Listen(protocol, net.JoinHostPort(hostname, strconv.Itoa(int(port.Local))))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to create listener: Error %s", err)
|
||||
return nil, fmt.Errorf("unable to create listener: Error %s", err)
|
||||
}
|
||||
listenerAddress := listener.Addr().String()
|
||||
host, localPort, _ := net.SplitHostPort(listenerAddress)
|
||||
@ -285,7 +285,7 @@ func (pf *PortForwarder) getListener(protocol string, hostname string, port *For
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(pf.out, "Failed to forward from %s:%d -> %d\n", hostname, localPortUInt, port.Remote)
|
||||
return nil, fmt.Errorf("Error parsing local port: %s from %s (%s)", err, listenerAddress, host)
|
||||
return nil, fmt.Errorf("error parsing local port: %s from %s (%s)", err, listenerAddress, host)
|
||||
}
|
||||
port.Local = uint16(localPortUInt)
|
||||
if pf.out != nil {
|
||||
@ -303,7 +303,7 @@ func (pf *PortForwarder) waitForConnection(listener net.Listener, port Forwarded
|
||||
if err != nil {
|
||||
// TODO consider using something like https://github.com/hydrogen18/stoppableListener?
|
||||
if !strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") {
|
||||
runtime.HandleError(fmt.Errorf("Error accepting connection on port %d: %v", port.Local, err))
|
||||
runtime.HandleError(fmt.Errorf("error accepting connection on port %d: %v", port.Local, err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -411,7 +411,7 @@ func (m *manager) rotateCerts() (bool, error) {
|
||||
// is a remainder after the old design using raw watch wrapped with backoff.
|
||||
crtPEM, err := csr.WaitForCertificate(ctx, client, req)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Certificate request was not signed: %v", err))
|
||||
utilruntime.HandleError(fmt.Errorf("certificate request was not signed: %v", err))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@ -981,7 +981,7 @@ func (c fakeClient) Create(*certificates.CertificateSigningRequest) (*certificat
|
||||
if c.err != nil {
|
||||
return nil, c.err
|
||||
}
|
||||
return nil, fmt.Errorf("Create error")
|
||||
return nil, fmt.Errorf("create error")
|
||||
}
|
||||
csrReply := certificates.CertificateSigningRequest{}
|
||||
csrReply.UID = "fake-uid"
|
||||
@ -993,7 +993,7 @@ func (c fakeClient) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
if c.err != nil {
|
||||
return nil, c.err
|
||||
}
|
||||
return nil, fmt.Errorf("Watch error")
|
||||
return nil, fmt.Errorf("watch error")
|
||||
}
|
||||
return &fakeWatch{
|
||||
failureType: c.failureType,
|
||||
|
@ -122,7 +122,7 @@ func (t *awsElasticBlockStoreCSITranslator) TranslateCSIPVToInTree(pv *v1.Persis
|
||||
if partition, ok := csiSource.VolumeAttributes["partition"]; ok {
|
||||
partValue, err := strconv.Atoi(partition)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to convert partition %v to integer: %v", partition, err)
|
||||
return nil, fmt.Errorf("failed to convert partition %v to integer: %v", partition, err)
|
||||
}
|
||||
ebsSource.Partition = int32(partValue)
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ func GetInTreeNameFromCSIName(pluginName string) (string, error) {
|
||||
if plugin, ok := inTreePlugins[pluginName]; ok {
|
||||
return plugin.GetInTreePluginName(), nil
|
||||
}
|
||||
return "", fmt.Errorf("Could not find In-Tree driver name for CSI plugin %v", pluginName)
|
||||
return "", fmt.Errorf("could not find In-Tree driver name for CSI plugin %v", pluginName)
|
||||
}
|
||||
|
||||
// IsPVMigratable tests whether there is migration logic for the given Persistent Volume
|
||||
|
@ -2182,13 +2182,13 @@ func wrapAttachError(err error, disk *awsDisk, instance string) error {
|
||||
if disk.awsID != EBSVolumeID(aws.StringValue(a.VolumeId)) {
|
||||
klog.Warningf("Expected to get attachment info of volume %q but instead got info of %q", disk.awsID, aws.StringValue(a.VolumeId))
|
||||
} else if aws.StringValue(a.State) == "attached" {
|
||||
return fmt.Errorf("Error attaching EBS volume %q to instance %q: %q. The volume is currently attached to instance %q", disk.awsID, instance, awsError, aws.StringValue(a.InstanceId))
|
||||
return fmt.Errorf("error attaching EBS volume %q to instance %q: %q. The volume is currently attached to instance %q", disk.awsID, instance, awsError, aws.StringValue(a.InstanceId))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Error attaching EBS volume %q to instance %q: %q", disk.awsID, instance, err)
|
||||
return fmt.Errorf("error attaching EBS volume %q to instance %q: %q", disk.awsID, instance, err)
|
||||
}
|
||||
|
||||
// AttachDisk implements Volumes.AttachDisk
|
||||
@ -2775,7 +2775,7 @@ func (c *Cloud) describeLoadBalancerv2(name string) (*elbv2.LoadBalancer, error)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Error describing load balancer: %q", err)
|
||||
return nil, fmt.Errorf("error describing load balancer: %q", err)
|
||||
}
|
||||
|
||||
// AWS will not return 2 load balancers with the same name _and_ type.
|
||||
@ -2792,7 +2792,7 @@ func (c *Cloud) describeLoadBalancerv2(name string) (*elbv2.LoadBalancer, error)
|
||||
func (c *Cloud) findVPCID() (string, error) {
|
||||
macs, err := c.metadata.GetMetadata("network/interfaces/macs/")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Could not list interfaces of the instance: %q", err)
|
||||
return "", fmt.Errorf("could not list interfaces of the instance: %q", err)
|
||||
}
|
||||
|
||||
// loop over interfaces, first vpc id returned wins
|
||||
@ -2807,7 +2807,7 @@ func (c *Cloud) findVPCID() (string, error) {
|
||||
}
|
||||
return vpcID, nil
|
||||
}
|
||||
return "", fmt.Errorf("Could not find VPC ID in instance metadata")
|
||||
return "", fmt.Errorf("could not find VPC ID in instance metadata")
|
||||
}
|
||||
|
||||
// Retrieves the specified security group from the AWS API, or returns nil if not found
|
||||
@ -3352,7 +3352,7 @@ func isSubnetPublic(rt []*ec2.RouteTable, subnetID string) (bool, error) {
|
||||
}
|
||||
|
||||
if subnetTable == nil {
|
||||
return false, fmt.Errorf("Could not locate routing table for subnet %s", subnetID)
|
||||
return false, fmt.Errorf("could not locate routing table for subnet %s", subnetID)
|
||||
}
|
||||
|
||||
for _, route := range subnetTable.Routes {
|
||||
@ -4032,7 +4032,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer
|
||||
loadBalancerSecurityGroupID = *securityGroup
|
||||
}
|
||||
if loadBalancerSecurityGroupID == "" {
|
||||
return fmt.Errorf("Could not determine security group for load balancer: %s", aws.StringValue(lb.LoadBalancerName))
|
||||
return fmt.Errorf("could not determine security group for load balancer: %s", aws.StringValue(lb.LoadBalancerName))
|
||||
}
|
||||
|
||||
// Get the actual list of groups that allow ingress from the load-balancer
|
||||
@ -4174,14 +4174,14 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin
|
||||
&elbv2.DescribeTargetGroupsInput{LoadBalancerArn: lb.LoadBalancerArn},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error listing target groups before deleting load balancer: %q", err)
|
||||
return fmt.Errorf("error listing target groups before deleting load balancer: %q", err)
|
||||
}
|
||||
|
||||
_, err = c.elbv2.DeleteLoadBalancer(
|
||||
&elbv2.DeleteLoadBalancerInput{LoadBalancerArn: lb.LoadBalancerArn},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting load balancer %q: %v", loadBalancerName, err)
|
||||
return fmt.Errorf("error deleting load balancer %q: %v", loadBalancerName, err)
|
||||
}
|
||||
|
||||
for _, group := range targetGroups.TargetGroups {
|
||||
@ -4189,7 +4189,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin
|
||||
&elbv2.DeleteTargetGroupInput{TargetGroupArn: group.TargetGroupArn},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting target groups after deleting load balancer: %q", err)
|
||||
return fmt.Errorf("error deleting target groups after deleting load balancer: %q", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa
|
||||
if eipList, present := annotations[ServiceAnnotationLoadBalancerEIPAllocations]; present {
|
||||
allocationIDs = strings.Split(eipList, ",")
|
||||
if len(allocationIDs) != len(subnetIDs) {
|
||||
return nil, fmt.Errorf("Error creating load balancer: Must have same number of EIP AllocationIDs (%d) and SubnetIDs (%d)", len(allocationIDs), len(subnetIDs))
|
||||
return nil, fmt.Errorf("error creating load balancer: Must have same number of EIP AllocationIDs (%d) and SubnetIDs (%d)", len(allocationIDs), len(subnetIDs))
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,7 +157,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa
|
||||
klog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName)
|
||||
createResponse, err := c.elbv2.CreateLoadBalancer(createRequest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating load balancer: %q", err)
|
||||
return nil, fmt.Errorf("error creating load balancer: %q", err)
|
||||
}
|
||||
|
||||
loadBalancer = createResponse.LoadBalancers[0]
|
||||
@ -166,7 +166,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa
|
||||
// duplicate target groups where the backend port is the same
|
||||
_, err := c.createListenerV2(createResponse.LoadBalancers[0].LoadBalancerArn, mappings[i], namespacedName, instanceIDs, *createResponse.LoadBalancers[0].VpcId, tags)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating listener: %q", err)
|
||||
return nil, fmt.Errorf("error creating listener: %q", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -180,7 +180,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error describing listeners: %q", err)
|
||||
return nil, fmt.Errorf("error describing listeners: %q", err)
|
||||
}
|
||||
|
||||
// actual maps FrontendPort to an elbv2.Listener
|
||||
@ -195,7 +195,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error listing target groups: %q", err)
|
||||
return nil, fmt.Errorf("error listing target groups: %q", err)
|
||||
}
|
||||
|
||||
nodePortTargetGroup := map[int64]*elbv2.TargetGroup{}
|
||||
@ -277,7 +277,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa
|
||||
}
|
||||
}
|
||||
if _, err := c.elbv2.ModifyListener(modifyListenerInput); err != nil {
|
||||
return nil, fmt.Errorf("Error updating load balancer listener: %q", err)
|
||||
return nil, fmt.Errorf("error updating load balancer listener: %q", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -286,7 +286,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa
|
||||
if _, err := c.elbv2.DeleteTargetGroup(&elbv2.DeleteTargetGroupInput{
|
||||
TargetGroupArn: listener.DefaultActions[0].TargetGroupArn,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("Error deleting old target group: %q", err)
|
||||
return nil, fmt.Errorf("error deleting old target group: %q", err)
|
||||
}
|
||||
} else {
|
||||
// Run ensureTargetGroup to make sure instances in service are up-to-date
|
||||
@ -345,7 +345,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error retrieving load balancer after update: %q", err)
|
||||
return nil, fmt.Errorf("error retrieving load balancer after update: %q", err)
|
||||
}
|
||||
loadBalancer = loadBalancers.LoadBalancers[0]
|
||||
}
|
||||
@ -502,7 +502,7 @@ func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping
|
||||
klog.Infof("Creating load balancer listener for %v", namespacedName)
|
||||
createListenerOutput, err := c.elbv2.CreateListener(createListernerInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating load balancer listener: %q", err)
|
||||
return nil, fmt.Errorf("error creating load balancer listener: %q", err)
|
||||
}
|
||||
return createListenerOutput.Listeners[0], nil
|
||||
}
|
||||
@ -511,11 +511,11 @@ func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping
|
||||
func (c *Cloud) deleteListenerV2(listener *elbv2.Listener) error {
|
||||
_, err := c.elbv2.DeleteListener(&elbv2.DeleteListenerInput{ListenerArn: listener.ListenerArn})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting load balancer listener: %q", err)
|
||||
return fmt.Errorf("error deleting load balancer listener: %q", err)
|
||||
}
|
||||
_, err = c.elbv2.DeleteTargetGroup(&elbv2.DeleteTargetGroupInput{TargetGroupArn: listener.DefaultActions[0].TargetGroupArn})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting load balancer target group: %q", err)
|
||||
return fmt.Errorf("error deleting load balancer target group: %q", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -552,10 +552,10 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty
|
||||
|
||||
result, err := c.elbv2.CreateTargetGroup(input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating load balancer target group: %q", err)
|
||||
return nil, fmt.Errorf("error creating load balancer target group: %q", err)
|
||||
}
|
||||
if len(result.TargetGroups) != 1 {
|
||||
return nil, fmt.Errorf("Expected only one target group on CreateTargetGroup, got %d groups", len(result.TargetGroups))
|
||||
return nil, fmt.Errorf("expected only one target group on CreateTargetGroup, got %d groups", len(result.TargetGroups))
|
||||
}
|
||||
|
||||
if len(tags) != 0 {
|
||||
@ -587,7 +587,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty
|
||||
|
||||
_, err = c.elbv2.RegisterTargets(registerInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error registering targets for load balancer: %q", err)
|
||||
return nil, fmt.Errorf("error registering targets for load balancer: %q", err)
|
||||
}
|
||||
|
||||
return result.TargetGroups[0], nil
|
||||
@ -597,7 +597,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty
|
||||
{
|
||||
healthResponse, err := c.elbv2.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{TargetGroupArn: targetGroup.TargetGroupArn})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error describing target group health: %q", err)
|
||||
return nil, fmt.Errorf("error describing target group health: %q", err)
|
||||
}
|
||||
actualIDs := []string{}
|
||||
for _, healthDescription := range healthResponse.TargetHealthDescriptions {
|
||||
@ -631,7 +631,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty
|
||||
}
|
||||
_, err := c.elbv2.RegisterTargets(registerInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error registering new targets in target group: %q", err)
|
||||
return nil, fmt.Errorf("error registering new targets in target group: %q", err)
|
||||
}
|
||||
dirty = true
|
||||
}
|
||||
@ -649,7 +649,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty
|
||||
}
|
||||
_, err := c.elbv2.DeregisterTargets(deregisterInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error trying to deregister targets in target group: %q", err)
|
||||
return nil, fmt.Errorf("error trying to deregister targets in target group: %q", err)
|
||||
}
|
||||
dirty = true
|
||||
}
|
||||
@ -679,7 +679,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty
|
||||
if dirtyHealthCheck {
|
||||
_, err := c.elbv2.ModifyTargetGroup(input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error modifying target group health check: %q", err)
|
||||
return nil, fmt.Errorf("error modifying target group health check: %q", err)
|
||||
}
|
||||
|
||||
dirty = true
|
||||
@ -691,7 +691,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty
|
||||
TargetGroupArns: []*string{targetGroup.TargetGroupArn},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error retrieving target group after creation/update: %q", err)
|
||||
return nil, fmt.Errorf("error retrieving target group after creation/update: %q", err)
|
||||
}
|
||||
targetGroup = result.TargetGroups[0]
|
||||
}
|
||||
@ -704,10 +704,10 @@ func (c *Cloud) getVpcCidrBlocks() ([]string, error) {
|
||||
VpcIds: []*string{aws.String(c.vpcID)},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error querying VPC for ELB: %q", err)
|
||||
return nil, fmt.Errorf("error querying VPC for ELB: %q", err)
|
||||
}
|
||||
if len(vpcs.Vpcs) != 1 {
|
||||
return nil, fmt.Errorf("Error querying VPC for ELB, got %d vpcs for %s", len(vpcs.Vpcs), c.vpcID)
|
||||
return nil, fmt.Errorf("error querying VPC for ELB, got %d vpcs for %s", len(vpcs.Vpcs), c.vpcID)
|
||||
}
|
||||
|
||||
cidrBlocks := make([]string, 0, len(vpcs.Vpcs[0].CidrBlockAssociationSet))
|
||||
|
@ -65,7 +65,7 @@ type awsTagging struct {
|
||||
func (t *awsTagging) init(legacyClusterID string, clusterID string) error {
|
||||
if legacyClusterID != "" {
|
||||
if clusterID != "" && legacyClusterID != clusterID {
|
||||
return fmt.Errorf("ClusterID tags did not match: %q vs %q", clusterID, legacyClusterID)
|
||||
return fmt.Errorf("clusterID tags did not match: %q vs %q", clusterID, legacyClusterID)
|
||||
}
|
||||
t.usesLegacyTags = true
|
||||
clusterID = legacyClusterID
|
||||
|
@ -101,7 +101,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type
|
||||
// This should never happen but if it does it could mean there was a race and instance
|
||||
// has been deleted
|
||||
if err != nil {
|
||||
fetchErr := fmt.Errorf("Error fetching instance %s for volume %s", instanceID, diskName)
|
||||
fetchErr := fmt.Errorf("error fetching instance %s for volume %s", instanceID, diskName)
|
||||
klog.Warning(fetchErr)
|
||||
return awsDiskInfo, false, fetchErr
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func (az *Cloud) getConfigFromSecret() (*Config, error) {
|
||||
|
||||
secret, err := az.kubeClient.CoreV1().Secrets(cloudConfigNamespace).Get(cloudConfigSecretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get secret %s: %v", cloudConfigSecretName, err)
|
||||
return nil, fmt.Errorf("failed to get secret %s: %v", cloudConfigSecretName, err)
|
||||
}
|
||||
|
||||
cloudConfigData, ok := secret.Data[cloudConfigKey]
|
||||
@ -84,7 +84,7 @@ func (az *Cloud) getConfigFromSecret() (*Config, error) {
|
||||
|
||||
err = yaml.Unmarshal(cloudConfigData, &config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse Azure cloud-config: %v", err)
|
||||
return nil, fmt.Errorf("failed to parse Azure cloud-config: %v", err)
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
|
@ -186,7 +186,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
return -1, fmt.Errorf("cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
|
@ -36,6 +36,10 @@ import (
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
errStrLbNoHosts = "cannot EnsureLoadBalancer() with no hosts"
|
||||
)
|
||||
|
||||
// ensureExternalLoadBalancer is the external implementation of LoadBalancer.EnsureLoadBalancer.
|
||||
// Our load balancers in GCE consist of four separate GCE resources - a static
|
||||
// IP address, a firewall rule, a target pool, and a forwarding rule. This
|
||||
@ -46,7 +50,7 @@ import (
|
||||
// each is needed.
|
||||
func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, apiService *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
if len(nodes) == 0 {
|
||||
return nil, fmt.Errorf("Cannot EnsureLoadBalancer() with no hosts")
|
||||
return nil, fmt.Errorf(errStrLbNoHosts)
|
||||
}
|
||||
|
||||
hostNames := nodeNames(nodes)
|
||||
@ -508,7 +512,7 @@ func (g *Cloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation bool,
|
||||
klog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts))
|
||||
if hcToCreate != nil {
|
||||
if hc, err := g.ensureHTTPHealthCheck(hcToCreate.Name, hcToCreate.RequestPath, int32(hcToCreate.Port)); err != nil || hc == nil {
|
||||
return fmt.Errorf("Failed to ensure health check for %v port %d path %v: %v", loadBalancerName, hcToCreate.Port, hcToCreate.RequestPath, err)
|
||||
return fmt.Errorf("failed to ensure health check for %v port %d path %v: %v", loadBalancerName, hcToCreate.Port, hcToCreate.RequestPath, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -538,7 +542,7 @@ func (g *Cloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, serviceNam
|
||||
var err error
|
||||
hcRequestPath, hcPort := hc.RequestPath, hc.Port
|
||||
if hc, err = g.ensureHTTPHealthCheck(hc.Name, hc.RequestPath, int32(hc.Port)); err != nil || hc == nil {
|
||||
return fmt.Errorf("Failed to ensure health check for %v port %d path %v: %v", name, hcPort, hcRequestPath, err)
|
||||
return fmt.Errorf("failed to ensure health check for %v port %d path %v: %v", name, hcPort, hcRequestPath, err)
|
||||
}
|
||||
hcLinks = append(hcLinks, hc.SelfLink)
|
||||
}
|
||||
@ -607,7 +611,7 @@ func (g *Cloud) updateTargetPool(loadBalancerName string, hosts []*gceInstance)
|
||||
if len(updatedPool.Instances) != len(hosts) {
|
||||
klog.Errorf("Unexpected number of instances (%d) in target pool %s after updating (expected %d). Instances in updated pool: %s",
|
||||
len(updatedPool.Instances), loadBalancerName, len(hosts), strings.Join(updatedPool.Instances, ","))
|
||||
return fmt.Errorf("Unexpected number of instances (%d) in target pool %s after update (expected %d)", len(updatedPool.Instances), loadBalancerName, len(hosts))
|
||||
return fmt.Errorf("unexpected number of instances (%d) in target pool %s after update (expected %d)", len(updatedPool.Instances), loadBalancerName, len(hosts))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -790,7 +794,7 @@ func loadBalancerPortRange(ports []v1.ServicePort) (string, error) {
|
||||
|
||||
// The service controller verified all the protocols match on the ports, just check and use the first one
|
||||
if ports[0].Protocol != v1.ProtocolTCP && ports[0].Protocol != v1.ProtocolUDP {
|
||||
return "", fmt.Errorf("Invalid protocol %s, only TCP and UDP are supported", string(ports[0].Protocol))
|
||||
return "", fmt.Errorf("invalid protocol %s, only TCP and UDP are supported", string(ports[0].Protocol))
|
||||
}
|
||||
|
||||
minPort := int32(65536)
|
||||
|
@ -43,7 +43,6 @@ const (
|
||||
eventReasonManualChange = "LoadBalancerManualChange"
|
||||
eventMsgFirewallChange = "Firewall change required by network admin"
|
||||
errPrefixGetTargetPool = "error getting load balancer's target pool:"
|
||||
errStrLbNoHosts = "Cannot EnsureLoadBalancer() with no hosts"
|
||||
wrongTier = "SupremeLuxury"
|
||||
errStrUnsupportedTier = "unsupported network tier: \"" + wrongTier + "\""
|
||||
)
|
||||
|
@ -84,7 +84,7 @@ func (secretCredentialManager *SecretCredentialManager) GetCredential(server str
|
||||
|
||||
func (secretCredentialManager *SecretCredentialManager) updateCredentialsMap() error {
|
||||
if secretCredentialManager.SecretLister == nil {
|
||||
return fmt.Errorf("SecretLister is not initialized")
|
||||
return fmt.Errorf("secretLister is not initialized")
|
||||
}
|
||||
secret, err := secretCredentialManager.SecretLister.Secrets(secretCredentialManager.SecretNamespace).Get(secretCredentialManager.SecretName)
|
||||
if err != nil {
|
||||
|
@ -44,27 +44,27 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
|
||||
}
|
||||
pbmClient, err := vclib.NewPbmClient(ctx, datastore.Client())
|
||||
if err != nil {
|
||||
klog.Errorf("Error occurred while creating new pbmClient, err: %+v", err)
|
||||
klog.Errorf("error occurred while creating new pbmClient, err: %+v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if vmdisk.volumeOptions.StoragePolicyID == "" && vmdisk.volumeOptions.StoragePolicyName != "" {
|
||||
vmdisk.volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, vmdisk.volumeOptions.StoragePolicyName)
|
||||
if err != nil {
|
||||
klog.Errorf("Error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err)
|
||||
klog.Errorf("error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if vmdisk.volumeOptions.StoragePolicyID != "" {
|
||||
compatible, faultMessage, err := datastore.IsCompatibleWithStoragePolicy(ctx, vmdisk.volumeOptions.StoragePolicyID)
|
||||
if err != nil {
|
||||
klog.Errorf("Error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err)
|
||||
klog.Errorf("error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !compatible {
|
||||
klog.Errorf("Datastore: %s is not compatible with Policy: %s", datastore.Name(), vmdisk.volumeOptions.StoragePolicyName)
|
||||
return "", fmt.Errorf("User specified datastore is not compatible with the storagePolicy: %q. Failed with faults: %+q", vmdisk.volumeOptions.StoragePolicyName, faultMessage)
|
||||
return "", fmt.Errorf("user specified datastore is not compatible with the storagePolicy: %q. Failed with faults: %+q", vmdisk.volumeOptions.StoragePolicyName, faultMessage)
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,9 +81,9 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
|
||||
}
|
||||
if dsType != vclib.VSANDatastoreType {
|
||||
klog.Errorf("The specified datastore: %q is not a VSAN datastore", datastore.Name())
|
||||
return "", fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+
|
||||
" The policy parameters will work only with VSAN Datastore."+
|
||||
" So, please specify a valid VSAN datastore in Storage class definition.", datastore.Name())
|
||||
return "", fmt.Errorf("the specified datastore: %q is not a VSAN datastore."+
|
||||
" the policy parameters will work only with VSAN Datastore."+
|
||||
" so, please specify a valid VSAN datastore in Storage class definition", datastore.Name())
|
||||
}
|
||||
storageProfileSpec.ProfileId = ""
|
||||
storageProfileSpec.ProfileData = &types.VirtualMachineProfileRawData{
|
||||
@ -92,7 +92,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
|
||||
}
|
||||
} else {
|
||||
klog.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set")
|
||||
return "", fmt.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set")
|
||||
return "", fmt.Errorf("both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set")
|
||||
}
|
||||
var dummyVM *vclib.VirtualMachine
|
||||
// Check if VM already exist in the folder.
|
||||
@ -106,7 +106,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
|
||||
klog.V(1).Infof("Creating Dummy VM: %q", dummyVMFullName)
|
||||
dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create Dummy VM. err: %v", err)
|
||||
klog.Errorf("failed to create Dummy VM. err: %v", err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
@ -115,7 +115,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
|
||||
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{}
|
||||
disk, _, err := dummyVM.CreateDiskSpec(ctx, vmdisk.diskPath, datastore, vmdisk.volumeOptions)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create Disk Spec. err: %v", err)
|
||||
klog.Errorf("failed to create Disk Spec. err: %v", err)
|
||||
return "", err
|
||||
}
|
||||
deviceConfigSpec := &types.VirtualDeviceConfigSpec{
|
||||
@ -220,7 +220,7 @@ func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder) error {
|
||||
}
|
||||
if vmList == nil || len(vmList) == 0 {
|
||||
klog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
|
||||
return fmt.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
|
||||
return fmt.Errorf("no virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
|
||||
}
|
||||
var dummyVMList []*vclib.VirtualMachine
|
||||
// Loop through VM's in the Kubernetes cluster to find dummy VM's
|
||||
|
@ -152,7 +152,7 @@ func GetDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath
|
||||
isSuccess := datastorePathObj.FromString(vmDiskPath)
|
||||
if !isSuccess {
|
||||
klog.Errorf("Failed to parse volPath: %s", vmDiskPath)
|
||||
return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
|
||||
return nil, fmt.Errorf("failed to parse volPath: %s", vmDiskPath)
|
||||
}
|
||||
return datastorePathObj, nil
|
||||
}
|
||||
|
@ -291,7 +291,7 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d
|
||||
if err := vm.deleteController(ctx, newSCSIController, vmDevices); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to delete SCSI controller after failing to find it on VM: %v", err)
|
||||
}
|
||||
return nil, nil, fmt.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType)
|
||||
return nil, nil, fmt.Errorf("cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType)
|
||||
}
|
||||
}
|
||||
disk := vmDevices.CreateDisk(scsiController, dsObj.Reference(), diskPath)
|
||||
|
@ -1322,14 +1322,14 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
|
||||
|
||||
// if datastoreInfo is still not determined, it is an error condition
|
||||
if datastoreInfo == nil {
|
||||
klog.Errorf("Ambigous datastore name %s, cannot be found among: %v", datastoreName, candidateDatastoreInfos)
|
||||
return "", fmt.Errorf("Ambigous datastore name %s", datastoreName)
|
||||
klog.Errorf("ambigous datastore name %s, cannot be found among: %v", datastoreName, candidateDatastoreInfos)
|
||||
return "", fmt.Errorf("ambigous datastore name %s", datastoreName)
|
||||
}
|
||||
ds := datastoreInfo.Datastore
|
||||
volumeOptions.Datastore = datastoreInfo.Info.Name
|
||||
vmOptions, err = vs.setVMOptions(ctx, vsi.conn, ds)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to set VM options required to create a vsphere volume. err: %+v", err)
|
||||
klog.Errorf("failed to set VM options required to create a vsphere volume. err: %+v", err)
|
||||
return "", err
|
||||
}
|
||||
kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/"
|
||||
@ -1605,7 +1605,7 @@ func (vs *VSphere) GetVolumeLabels(volumePath string) (map[string]string, error)
|
||||
}
|
||||
if datastore == nil {
|
||||
klog.Errorf("Could not find %s among %v", volumePath, dsInfos)
|
||||
return nil, fmt.Errorf("Could not find the datastore for volume: %s", volumePath)
|
||||
return nil, fmt.Errorf("could not find the datastore for volume: %s", volumePath)
|
||||
}
|
||||
|
||||
dsZones, err := vs.GetZonesForDatastore(ctx, datastore)
|
||||
|
@ -279,7 +279,7 @@ func getDatastoresForZone(ctx context.Context, nodeManager *NodeManager, selecte
|
||||
}
|
||||
}
|
||||
if dcMoref == nil {
|
||||
return nil, fmt.Errorf("Failed to find the Datacenter of host %s", host)
|
||||
return nil, fmt.Errorf("failed to find the Datacenter of host %s", host)
|
||||
}
|
||||
|
||||
dc := object.NewDatacenter(host.Client(), *dcMoref)
|
||||
@ -658,13 +658,13 @@ func GetVMUUID() (string, error) {
|
||||
uuid := strings.TrimSpace(uuidFromFile)
|
||||
// check the uuid starts with "VMware-"
|
||||
if !strings.HasPrefix(uuid, UUIDPrefix) {
|
||||
return "", fmt.Errorf("Failed to match Prefix, UUID read from the file is %v", uuidFromFile)
|
||||
return "", fmt.Errorf("failed to match Prefix, UUID read from the file is %v", uuidFromFile)
|
||||
}
|
||||
// Strip the prefix and white spaces and -
|
||||
uuid = strings.Replace(uuid[len(UUIDPrefix):(len(uuid))], " ", "", -1)
|
||||
uuid = strings.Replace(uuid, "-", "", -1)
|
||||
if len(uuid) != 32 {
|
||||
return "", fmt.Errorf("Length check failed, UUID read from the file is %v", uuidFromFile)
|
||||
return "", fmt.Errorf("length check failed, UUID read from the file is %v", uuidFromFile)
|
||||
}
|
||||
// need to add dashes, e.g. "564d395e-d807-e18a-cb25-b79f65eb2b9f"
|
||||
uuid = fmt.Sprintf("%s-%s-%s-%s-%s", uuid[0:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:32])
|
||||
|
@ -86,7 +86,7 @@ run_client_config_tests() {
|
||||
# test invalid config
|
||||
kubectl config view | sed -E "s/apiVersion: .*/apiVersion: v-1/g" > "${TMPDIR:-/tmp}"/newconfig.yaml
|
||||
output_message=$(! "${KUBE_OUTPUT_HOSTBIN}/kubectl" get pods --context="" --user="" --kubeconfig="${TMPDIR:-/tmp}"/newconfig.yaml 2>&1)
|
||||
kube::test::if_has_string "${output_message}" "Error loading config file"
|
||||
kube::test::if_has_string "${output_message}" "error loading config file"
|
||||
|
||||
output_message=$(! kubectl get pod --kubeconfig=missing-config 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'no such file or directory'
|
||||
|
@ -377,13 +377,13 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
||||
gomega.Eventually(func() error {
|
||||
channel, msg, err := wsRead(ws)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to read completely from websocket %s: %v", url.String(), err)
|
||||
return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err)
|
||||
}
|
||||
if channel != 0 {
|
||||
return fmt.Errorf("Got message from server that didn't start with channel 0 (data): %v", msg)
|
||||
return fmt.Errorf("got message from server that didn't start with channel 0 (data): %v", msg)
|
||||
}
|
||||
if p := binary.LittleEndian.Uint16(msg); p != 80 {
|
||||
return fmt.Errorf("Received the wrong port: %d", p)
|
||||
return fmt.Errorf("received the wrong port: %d", p)
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
|
||||
@ -391,13 +391,13 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
||||
gomega.Eventually(func() error {
|
||||
channel, msg, err := wsRead(ws)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to read completely from websocket %s: %v", url.String(), err)
|
||||
return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err)
|
||||
}
|
||||
if channel != 1 {
|
||||
return fmt.Errorf("Got message from server that didn't start with channel 1 (error): %v", msg)
|
||||
return fmt.Errorf("got message from server that didn't start with channel 1 (error): %v", msg)
|
||||
}
|
||||
if p := binary.LittleEndian.Uint16(msg); p != 80 {
|
||||
return fmt.Errorf("Received the wrong port: %d", p)
|
||||
return fmt.Errorf("received the wrong port: %d", p)
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
|
||||
@ -414,14 +414,14 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
||||
gomega.Eventually(func() error {
|
||||
channel, msg, err := wsRead(ws)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to read completely from websocket %s: %v", url.String(), err)
|
||||
return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err)
|
||||
}
|
||||
if channel != 0 {
|
||||
return fmt.Errorf("Got message from server that didn't start with channel 0 (data): %v", msg)
|
||||
return fmt.Errorf("got message from server that didn't start with channel 0 (data): %v", msg)
|
||||
}
|
||||
buf.Write(msg)
|
||||
if bytes.Equal(expectedData, buf.Bytes()) {
|
||||
return fmt.Errorf("Expected %q from server, got %q", expectedData, buf.Bytes())
|
||||
return fmt.Errorf("expected %q from server, got %q", expectedData, buf.Bytes())
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
|
||||
|
Loading…
Reference in New Issue
Block a user