Merge pull request #81855 from oomichi/replace-e2elog-framework-perf-replicaset

Use log functions of core framework on sub p*
This commit is contained in:
Kubernetes Prow Robot 2019-08-26 10:20:24 -07:00 committed by GitHub
commit 52f708dc28
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 57 additions and 67 deletions

View File

@ -9,7 +9,6 @@ go_library(
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/legacy-cloud-providers/aws:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",

View File

@ -28,7 +28,6 @@ import (
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
awscloud "k8s.io/legacy-cloud-providers/aws"
)
@ -123,7 +122,7 @@ func (p *Provider) DeletePD(pdName string) error {
_, err := client.DeleteVolume(request)
if err != nil {
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
e2elog.Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
framework.Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
} else {
return fmt.Errorf("error deleting EBS volumes: %v", err)
}
@ -153,7 +152,7 @@ func newAWSClient(zone string) *ec2.EC2 {
zone = framework.TestContext.CloudConfig.Zone
}
if zone == "" {
e2elog.Logf("Warning: No AWS zone configured!")
framework.Logf("Warning: No AWS zone configured!")
cfg = nil
} else {
region := zone[:len(zone)-1]
@ -161,7 +160,7 @@ func newAWSClient(zone string) *ec2.EC2 {
}
session, err := session.NewSession()
if err != nil {
e2elog.Logf("Warning: failed to create aws session")
framework.Logf("Warning: failed to create aws session")
}
return ec2.New(session, cfg)
}

View File

@ -10,7 +10,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/legacy-cloud-providers/azure:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
],
)

View File

@ -24,7 +24,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/legacy-cloud-providers/azure"
)
@ -38,7 +37,7 @@ func newProvider() (framework.ProviderInterface, error) {
}
config, err := os.Open(framework.TestContext.CloudConfig.ConfigFile)
if err != nil {
e2elog.Logf("Couldn't open cloud provider configuration %s: %#v",
framework.Logf("Couldn't open cloud provider configuration %s: %#v",
framework.TestContext.CloudConfig.ConfigFile, err)
}
defer config.Close()
@ -73,7 +72,7 @@ func (p *Provider) CreatePD(zone string) (string, error) {
// DeletePD deletes a persistent volume
func (p *Provider) DeletePD(pdName string) error {
if err := p.azureCloud.DeleteVolume(pdName); err != nil {
e2elog.Logf("failed to delete Azure volume %q: %v", pdName, err)
framework.Logf("failed to delete Azure volume %q: %v", pdName, err)
return err
}
return nil

View File

@ -23,7 +23,6 @@ go_library(
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/service:go_default_library",

View File

@ -30,7 +30,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
gcecloud "k8s.io/legacy-cloud-providers/gce"
)
@ -43,7 +42,7 @@ func MakeFirewallNameForLBService(name string) string {
// ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Firewall {
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
e2elog.Failf("can not construct firewall rule for non-loadbalancer type service")
framework.Failf("can not construct firewall rule for non-loadbalancer type service")
}
fw := compute.Firewall{}
fw.Name = MakeFirewallNameForLBService(cloudprovider.DefaultLoadBalancerName(svc))
@ -71,7 +70,7 @@ func MakeHealthCheckFirewallNameForLBService(clusterID, name string, isNodesHeal
// ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodeTag string, isNodesHealthCheck bool) *compute.Firewall {
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
e2elog.Failf("can not construct firewall rule for non-loadbalancer type service")
framework.Failf("can not construct firewall rule for non-loadbalancer type service")
}
fw := compute.Firewall{}
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), isNodesHealthCheck)
@ -396,7 +395,7 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset
// WaitForFirewallRule waits for the specified firewall existence
func WaitForFirewallRule(gceCloud *gcecloud.Cloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) {
e2elog.Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist)
framework.Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist)
var fw *compute.Firewall
var err error

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
gcecloud "k8s.io/legacy-cloud-providers/gce"
)
@ -43,7 +42,7 @@ func init() {
}
func factory() (framework.ProviderInterface, error) {
e2elog.Logf("Fetching cloud provider for %q\r", framework.TestContext.Provider)
framework.Logf("Fetching cloud provider for %q\r", framework.TestContext.Provider)
zone := framework.TestContext.CloudConfig.Zone
region := framework.TestContext.CloudConfig.Region
@ -177,7 +176,7 @@ func (p *Provider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) erro
}
for _, item := range list.Items {
if item.PortRange == portRange && item.IPAddress == ip {
e2elog.Logf("found a load balancer: %v", item)
framework.Logf("found a load balancer: %v", item)
return false, nil
}
}
@ -231,7 +230,7 @@ func (p *Provider) DeletePD(pdName string) error {
return nil
}
e2elog.Logf("error deleting PD %q: %v", pdName, err)
framework.Logf("error deleting PD %q: %v", pdName, err)
}
return err
}
@ -258,12 +257,12 @@ func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
if pollErr := wait.Poll(5*time.Second, e2eservice.LoadBalancerCleanupTimeout, func() (bool, error) {
if err := p.cleanupGCEResources(c, loadBalancerName, region, zone); err != nil {
e2elog.Logf("Still waiting for glbc to cleanup: %v", err)
framework.Logf("Still waiting for glbc to cleanup: %v", err)
return false, nil
}
return true, nil
}); pollErr != nil {
e2elog.Failf("Failed to cleanup service GCE resources.")
framework.Failf("Failed to cleanup service GCE resources.")
}
}
@ -333,7 +332,7 @@ func GetInstanceTags(cloudConfig framework.CloudConfig, instanceName string) *co
res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
instanceName).Do()
if err != nil {
e2elog.Failf("Failed to get instance tags for %v: %v", instanceName, err)
framework.Failf("Failed to get instance tags for %v: %v", instanceName, err)
}
return res.Tags
}
@ -347,9 +346,9 @@ func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone strin
cloudConfig.ProjectID, zone, instanceName,
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
if err != nil {
e2elog.Failf("failed to set instance tags: %v", err)
framework.Failf("failed to set instance tags: %v", err)
}
e2elog.Logf("Sent request to set tags %v on instance: %v", tags, instanceName)
framework.Logf("Sent request to set tags %v on instance: %v", tags, instanceName)
return resTags.Items
}
@ -357,7 +356,7 @@ func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone strin
func GetNodeTags(c clientset.Interface, cloudConfig framework.CloudConfig) []string {
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) == 0 {
e2elog.Logf("GetNodeTags: Found 0 node.")
framework.Logf("GetNodeTags: Found 0 node.")
return []string{}
}
return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items

View File

@ -34,7 +34,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
utilexec "k8s.io/utils/exec"
)
@ -86,7 +85,7 @@ func (cont *IngressController) CleanupIngressController() error {
func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time.Duration) error {
pollErr := wait.Poll(5*time.Second, timeout, func() (bool, error) {
if err := cont.Cleanup(false); err != nil {
e2elog.Logf("Monitoring glbc's cleanup of gce resources:\n%v", err)
framework.Logf("Monitoring glbc's cleanup of gce resources:\n%v", err)
return false, nil
}
return true, nil
@ -107,7 +106,7 @@ func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time.
// throw out confusing events.
if ipErr := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
if err := cont.deleteStaticIPs(); err != nil {
e2elog.Logf("Failed to delete static-ip: %v\n", err)
framework.Logf("Failed to delete static-ip: %v\n", err)
return false, nil
}
return true, nil
@ -126,7 +125,7 @@ func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time.
}
func (cont *IngressController) getL7AddonUID() (string, error) {
e2elog.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap)
framework.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap)
cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{})
if err != nil {
return "", err
@ -295,7 +294,7 @@ func (cont *IngressController) deleteURLMap(del bool) (msg string) {
continue
}
if del {
e2elog.Logf("Deleting url-map: %s", um.Name)
framework.Logf("Deleting url-map: %s", um.Name)
if err := gceCloud.DeleteURLMap(um.Name); err != nil &&
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
msg += fmt.Sprintf("Failed to delete url map %v\n", um.Name)
@ -331,7 +330,7 @@ func (cont *IngressController) deleteBackendService(del bool) (msg string) {
return fmt.Sprintf("Failed to list backend services: %v", err)
}
if len(beList) == 0 {
e2elog.Logf("No backend services found")
framework.Logf("No backend services found")
return msg
}
for _, be := range beList {
@ -339,7 +338,7 @@ func (cont *IngressController) deleteBackendService(del bool) (msg string) {
continue
}
if del {
e2elog.Logf("Deleting backed-service: %s", be.Name)
framework.Logf("Deleting backed-service: %s", be.Name)
if err := gceCloud.DeleteGlobalBackendService(be.Name); err != nil &&
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
msg += fmt.Sprintf("Failed to delete backend service %v: %v\n", be.Name, err)
@ -368,7 +367,7 @@ func (cont *IngressController) deleteHTTPHealthCheck(del bool) (msg string) {
continue
}
if del {
e2elog.Logf("Deleting http-health-check: %s", hc.Name)
framework.Logf("Deleting http-health-check: %s", hc.Name)
if err := gceCloud.DeleteHTTPHealthCheck(hc.Name); err != nil &&
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
msg += fmt.Sprintf("Failed to delete HTTP health check %v\n", hc.Name)
@ -409,7 +408,7 @@ func (cont *IngressController) deleteSSLCertificate(del bool) (msg string) {
continue
}
if del {
e2elog.Logf("Deleting ssl-certificate: %s", s.Name)
framework.Logf("Deleting ssl-certificate: %s", s.Name)
if err := gceCloud.DeleteSslCertificate(s.Name); err != nil &&
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
msg += fmt.Sprintf("Failed to delete ssl certificates: %v\n", s.Name)
@ -455,7 +454,7 @@ func (cont *IngressController) deleteInstanceGroup(del bool) (msg string) {
continue
}
if del {
e2elog.Logf("Deleting instance-group: %s", ig.Name)
framework.Logf("Deleting instance-group: %s", ig.Name)
if err := gceCloud.DeleteInstanceGroup(ig.Name, cont.Cloud.Zone); err != nil &&
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
msg += fmt.Sprintf("Failed to delete instance group %v\n", ig.Name)
@ -477,7 +476,7 @@ func (cont *IngressController) deleteNetworkEndpointGroup(del bool) (msg string)
return msg
}
// Do not return error as NEG is still alpha.
e2elog.Logf("Failed to list network endpoint group: %v", err)
framework.Logf("Failed to list network endpoint group: %v", err)
return msg
}
if len(negList) == 0 {
@ -488,7 +487,7 @@ func (cont *IngressController) deleteNetworkEndpointGroup(del bool) (msg string)
continue
}
if del {
e2elog.Logf("Deleting network-endpoint-group: %s", neg.Name)
framework.Logf("Deleting network-endpoint-group: %s", neg.Name)
if err := gceCloud.DeleteNetworkEndpointGroup(neg.Name, cont.Cloud.Zone); err != nil &&
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
msg += fmt.Sprintf("Failed to delete network endpoint group %v\n", neg.Name)
@ -556,11 +555,11 @@ func (cont *IngressController) canDeleteNEG(resourceName, creationTimestamp stri
func canDeleteWithTimestamp(resourceName, creationTimestamp string) bool {
createdTime, err := time.Parse(time.RFC3339, creationTimestamp)
if err != nil {
e2elog.Logf("WARNING: Failed to parse creation timestamp %v for %v: %v", creationTimestamp, resourceName, err)
framework.Logf("WARNING: Failed to parse creation timestamp %v for %v: %v", creationTimestamp, resourceName, err)
return false
}
if time.Since(createdTime) > maxAge {
e2elog.Logf("%v created on %v IS too old", resourceName, creationTimestamp)
framework.Logf("%v created on %v IS too old", resourceName, creationTimestamp)
return true
}
return false
@ -619,7 +618,7 @@ func (cont *IngressController) WaitForNegBackendService(svcPorts map[string]v1.S
return wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
err := cont.verifyBackendMode(svcPorts, negBackend)
if err != nil {
e2elog.Logf("Err while checking if backend service is using NEG: %v", err)
framework.Logf("Err while checking if backend service is using NEG: %v", err)
return false, nil
}
return true, nil
@ -631,7 +630,7 @@ func (cont *IngressController) WaitForIgBackendService(svcPorts map[string]v1.Se
return wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
err := cont.verifyBackendMode(svcPorts, igBackend)
if err != nil {
e2elog.Logf("Err while checking if backend service is using IG: %v", err)
framework.Logf("Err while checking if backend service is using IG: %v", err)
return false, nil
}
return true, nil
@ -765,9 +764,9 @@ func (cont *IngressController) Init() error {
// There's a name limit imposed by GCE. The controller will truncate.
testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.Ns, cont.UID)
if len(testName) > nameLenLimit {
e2elog.Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit)
framework.Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit)
} else {
e2elog.Logf("Detected cluster UID %v", cont.UID)
framework.Logf("Detected cluster UID %v", cont.UID)
}
return nil
}
@ -781,21 +780,21 @@ func (cont *IngressController) CreateStaticIP(name string) string {
if err := gceCloud.ReserveGlobalAddress(addr); err != nil {
if delErr := gceCloud.DeleteGlobalAddress(name); delErr != nil {
if cont.isHTTPErrorCode(delErr, http.StatusNotFound) {
e2elog.Logf("Static ip with name %v was not allocated, nothing to delete", name)
framework.Logf("Static ip with name %v was not allocated, nothing to delete", name)
} else {
e2elog.Logf("Failed to delete static ip %v: %v", name, delErr)
framework.Logf("Failed to delete static ip %v: %v", name, delErr)
}
}
e2elog.Failf("Failed to allocate static ip %v: %v", name, err)
framework.Failf("Failed to allocate static ip %v: %v", name, err)
}
ip, err := gceCloud.GetGlobalAddress(name)
if err != nil {
e2elog.Failf("Failed to get newly created static ip %v: %v", name, err)
framework.Failf("Failed to get newly created static ip %v: %v", name, err)
}
cont.staticIPName = ip.Name
e2elog.Logf("Reserved static ip %v: %v", cont.staticIPName, ip.Address)
framework.Logf("Reserved static ip %v: %v", cont.staticIPName, ip.Address)
return ip.Address
}
@ -815,7 +814,7 @@ func (cont *IngressController) deleteStaticIPs() error {
for _, ip := range e2eIPs {
ips = append(ips, ip.Name)
}
e2elog.Logf("None of the remaining %d static-ips were created by this e2e: %v", len(ips), strings.Join(ips, ", "))
framework.Logf("None of the remaining %d static-ips were created by this e2e: %v", len(ips), strings.Join(ips, ", "))
}
return nil
}
@ -841,32 +840,32 @@ func gcloudComputeResourceList(resource, regex, project string, out interface{})
errMsg = fmt.Sprintf("%v, stderr %v", errMsg, string(osExitErr.Stderr))
}
}
e2elog.Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d, msg: %v", strings.Join(command, " "), err, string(output), errCode, errMsg)
framework.Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d, msg: %v", strings.Join(command, " "), err, string(output), errCode, errMsg)
}
if err := json.Unmarshal([]byte(output), out); err != nil {
e2elog.Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output))
framework.Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output))
}
}
// GcloudComputeResourceDelete deletes the specified compute resource by name and project.
func GcloudComputeResourceDelete(resource, name, project string, args ...string) error {
e2elog.Logf("Deleting %v: %v", resource, name)
framework.Logf("Deleting %v: %v", resource, name)
argList := append([]string{"compute", resource, "delete", name, fmt.Sprintf("--project=%v", project), "-q"}, args...)
output, err := exec.Command("gcloud", argList...).CombinedOutput()
if err != nil {
e2elog.Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err)
framework.Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err)
}
return err
}
// GcloudComputeResourceCreate creates a compute resource with a name and arguments.
func GcloudComputeResourceCreate(resource, name, project string, args ...string) error {
e2elog.Logf("Creating %v in project %v: %v", resource, project, name)
framework.Logf("Creating %v in project %v: %v", resource, project, name)
argsList := append([]string{"compute", resource, "create", name, fmt.Sprintf("--project=%v", project)}, args...)
e2elog.Logf("Running command: gcloud %+v", strings.Join(argsList, " "))
framework.Logf("Running command: gcloud %+v", strings.Join(argsList, " "))
output, err := exec.Command("gcloud", argsList...).CombinedOutput()
if err != nil {
e2elog.Logf("Error creating %v, output: %v\nerror: %+v", resource, string(output), err)
framework.Logf("Error creating %v, output: %v\nerror: %+v", resource, string(output), err)
}
return err
}

View File

@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
@ -55,7 +54,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
originalNodes, err = e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
framework.ExpectNoError(err)
e2elog.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes))
framework.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes))
ps, err = testutils.NewPodStore(f.ClientSet, systemNamespace, labels.Everything(), fields.Everything())
framework.ExpectNoError(err)
@ -67,7 +66,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
}
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
e2elog.Failf("At least one pod wasn't running and ready or succeeded at test start.")
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
}
})
@ -81,7 +80,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
framework.ExpectNoError(err)
for _, e := range events.Items {
e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
}
if ps != nil {
@ -98,20 +97,20 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace string, nodes []v1.Node, podNames []string) {
err := RecreateNodes(c, nodes)
if err != nil {
e2elog.Failf("Test failed; failed to start the restart instance group command.")
framework.Failf("Test failed; failed to start the restart instance group command.")
}
err = WaitForNodeBootIdsToChange(c, nodes, framework.RecreateNodeReadyAgainTimeout)
if err != nil {
e2elog.Failf("Test failed; failed to recreate at least one node in %v.", framework.RecreateNodeReadyAgainTimeout)
framework.Failf("Test failed; failed to recreate at least one node in %v.", framework.RecreateNodeReadyAgainTimeout)
}
nodesAfter, err := e2enode.CheckReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout)
framework.ExpectNoError(err)
e2elog.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter))
framework.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter))
if len(nodes) != len(nodesAfter) {
e2elog.Failf("Had %d nodes before nodes were recreated, but now only have %d",
framework.Failf("Had %d nodes before nodes were recreated, but now only have %d",
len(nodes), len(nodesAfter))
}
@ -121,6 +120,6 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
framework.ExpectNoError(err)
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) {
e2elog.Failf("At least one pod wasn't running and ready after the restart.")
framework.Failf("At least one pod wasn't running and ready after the restart.")
}
}

View File

@ -26,7 +26,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
// RecreateNodes recreates the given nodes in a managed instance group.
@ -62,7 +61,7 @@ func RecreateNodes(c clientset.Interface, nodes []v1.Node) error {
args = append(args, fmt.Sprintf("--instances=%s", strings.Join(nodeNames, ",")))
args = append(args, fmt.Sprintf("--zone=%s", zone))
e2elog.Logf("Recreating instance group %s.", instanceGroup)
framework.Logf("Recreating instance group %s.", instanceGroup)
stdout, stderr, err := framework.RunCmd("gcloud", args...)
if err != nil {
return fmt.Errorf("error recreating nodes: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
@ -79,7 +78,7 @@ func WaitForNodeBootIdsToChange(c clientset.Interface, nodes []v1.Node, timeout
if err := wait.Poll(30*time.Second, timeout, func() (bool, error) {
newNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Could not get node info: %s. Retrying in %v.", err, 30*time.Second)
framework.Logf("Could not get node info: %s. Retrying in %v.", err, 30*time.Second)
return false, nil
}
return node.Status.NodeInfo.BootID != newNode.Status.NodeInfo.BootID, nil