mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-07 19:23:40 +00:00
use log functions of core framework
This commit is contained in:
parent
12e6930d8a
commit
888c8476a9
@ -16,7 +16,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/config:go_default_library",
|
"//test/e2e/framework/config:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
|
||||||
"//test/e2e/instrumentation/common:go_default_library",
|
"//test/e2e/instrumentation/common:go_default_library",
|
||||||
"//test/e2e/instrumentation/logging/elasticsearch:go_default_library",
|
"//test/e2e/instrumentation/logging/elasticsearch:go_default_library",
|
||||||
"//test/e2e/instrumentation/logging/stackdriver:go_default_library",
|
"//test/e2e/instrumentation/logging/stackdriver:go_default_library",
|
||||||
|
@ -20,7 +20,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/service:go_default_library",
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/e2e/instrumentation/common:go_default_library",
|
"//test/e2e/instrumentation/common:go_default_library",
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||||
@ -64,7 +63,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
|||||||
// being run as the first e2e test just after the e2e cluster has been created.
|
// being run as the first e2e test just after the e2e cluster has been created.
|
||||||
err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||||
if _, err := s.Get("kibana-logging", metav1.GetOptions{}); err != nil {
|
if _, err := s.Get("kibana-logging", metav1.GetOptions{}); err != nil {
|
||||||
e2elog.Logf("Kibana is unreachable: %v", err)
|
framework.Logf("Kibana is unreachable: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -86,7 +85,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
|||||||
err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||||
req, err := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
req, err := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to get services proxy request: %v", err)
|
framework.Logf("Failed to get services proxy request: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,7 +97,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
|||||||
Name("kibana-logging").
|
Name("kibana-logging").
|
||||||
DoRaw()
|
DoRaw()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Proxy call to kibana-logging failed: %v", err)
|
framework.Logf("Proxy call to kibana-logging failed: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||||
@ -57,7 +56,7 @@ func newEsLogProvider(f *framework.Framework) (*esLogProvider, error) {
|
|||||||
func (p *esLogProvider) Init() error {
|
func (p *esLogProvider) Init() error {
|
||||||
f := p.Framework
|
f := p.Framework
|
||||||
// Check for the existence of the Elasticsearch service.
|
// Check for the existence of the Elasticsearch service.
|
||||||
e2elog.Logf("Checking the Elasticsearch service exists.")
|
framework.Logf("Checking the Elasticsearch service exists.")
|
||||||
s := f.ClientSet.CoreV1().Services(api.NamespaceSystem)
|
s := f.ClientSet.CoreV1().Services(api.NamespaceSystem)
|
||||||
// Make a few attempts to connect. This makes the test robust against
|
// Make a few attempts to connect. This makes the test robust against
|
||||||
// being run as the first e2e test just after the e2e cluster has been created.
|
// being run as the first e2e test just after the e2e cluster has been created.
|
||||||
@ -66,14 +65,14 @@ func (p *esLogProvider) Init() error {
|
|||||||
if _, err = s.Get("elasticsearch-logging", meta_v1.GetOptions{}); err == nil {
|
if _, err = s.Get("elasticsearch-logging", meta_v1.GetOptions{}); err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
e2elog.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start))
|
framework.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the Elasticsearch pods to enter the running state.
|
// Wait for the Elasticsearch pods to enter the running state.
|
||||||
e2elog.Logf("Checking to make sure the Elasticsearch pods are running")
|
framework.Logf("Checking to make sure the Elasticsearch pods are running")
|
||||||
labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String()
|
labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String()
|
||||||
options := meta_v1.ListOptions{LabelSelector: labelSelector}
|
options := meta_v1.ListOptions{LabelSelector: labelSelector}
|
||||||
pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options)
|
pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options)
|
||||||
@ -87,7 +86,7 @@ func (p *esLogProvider) Init() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
e2elog.Logf("Checking to make sure we are talking to an Elasticsearch service.")
|
framework.Logf("Checking to make sure we are talking to an Elasticsearch service.")
|
||||||
// Perform a few checks to make sure this looks like an Elasticsearch cluster.
|
// Perform a few checks to make sure this looks like an Elasticsearch cluster.
|
||||||
var statusCode int
|
var statusCode int
|
||||||
err = nil
|
err = nil
|
||||||
@ -95,7 +94,7 @@ func (p *esLogProvider) Init() error {
|
|||||||
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
||||||
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||||
if errProxy != nil {
|
if errProxy != nil {
|
||||||
e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Query against the root URL for Elasticsearch.
|
// Query against the root URL for Elasticsearch.
|
||||||
@ -106,11 +105,11 @@ func (p *esLogProvider) Init() error {
|
|||||||
response.StatusCode(&statusCode)
|
response.StatusCode(&statusCode)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
|
framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if int(statusCode) != 200 {
|
if int(statusCode) != 200 {
|
||||||
e2elog.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode)
|
framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
@ -119,17 +118,17 @@ func (p *esLogProvider) Init() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if int(statusCode) != 200 {
|
if int(statusCode) != 200 {
|
||||||
e2elog.Failf("Elasticsearch cluster has a bad status: %v", statusCode)
|
framework.Failf("Elasticsearch cluster has a bad status: %v", statusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now assume we really are talking to an Elasticsearch instance.
|
// Now assume we really are talking to an Elasticsearch instance.
|
||||||
// Check the cluster health.
|
// Check the cluster health.
|
||||||
e2elog.Logf("Checking health of Elasticsearch service.")
|
framework.Logf("Checking health of Elasticsearch service.")
|
||||||
healthy := false
|
healthy := false
|
||||||
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
||||||
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||||
if errProxy != nil {
|
if errProxy != nil {
|
||||||
e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
body, err = proxyRequest.Namespace(api.NamespaceSystem).
|
body, err = proxyRequest.Namespace(api.NamespaceSystem).
|
||||||
@ -143,17 +142,17 @@ func (p *esLogProvider) Init() error {
|
|||||||
health := make(map[string]interface{})
|
health := make(map[string]interface{})
|
||||||
err := json.Unmarshal(body, &health)
|
err := json.Unmarshal(body, &health)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Bad json response from elasticsearch: %v", err)
|
framework.Logf("Bad json response from elasticsearch: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
statusIntf, ok := health["status"]
|
statusIntf, ok := health["status"]
|
||||||
if !ok {
|
if !ok {
|
||||||
e2elog.Logf("No status field found in cluster health response: %v", health)
|
framework.Logf("No status field found in cluster health response: %v", health)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
status := statusIntf.(string)
|
status := statusIntf.(string)
|
||||||
if status != "green" && status != "yellow" {
|
if status != "green" && status != "yellow" {
|
||||||
e2elog.Logf("Cluster health has bad status: %v", health)
|
framework.Logf("Cluster health has bad status: %v", health)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err == nil && ok {
|
if err == nil && ok {
|
||||||
@ -177,12 +176,12 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
|||||||
|
|
||||||
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||||
if errProxy != nil {
|
if errProxy != nil {
|
||||||
e2elog.Logf("Failed to get services proxy request: %v", errProxy)
|
framework.Logf("Failed to get services proxy request: %v", errProxy)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
query := fmt.Sprintf("kubernetes.pod_name:%s AND kubernetes.namespace_name:%s", name, f.Namespace.Name)
|
query := fmt.Sprintf("kubernetes.pod_name:%s AND kubernetes.namespace_name:%s", name, f.Namespace.Name)
|
||||||
e2elog.Logf("Sending a search request to Elasticsearch with the following query: %s", query)
|
framework.Logf("Sending a search request to Elasticsearch with the following query: %s", query)
|
||||||
|
|
||||||
// Ask Elasticsearch to return all the log lines that were tagged with the
|
// Ask Elasticsearch to return all the log lines that were tagged with the
|
||||||
// pod name. Ask for ten times as many log lines because duplication is possible.
|
// pod name. Ask for ten times as many log lines because duplication is possible.
|
||||||
@ -194,26 +193,26 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
|||||||
Param("size", strconv.Itoa(searchPageSize)).
|
Param("size", strconv.Itoa(searchPageSize)).
|
||||||
DoRaw()
|
DoRaw()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to make proxy call to elasticsearch-logging: %v", err)
|
framework.Logf("Failed to make proxy call to elasticsearch-logging: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var response map[string]interface{}
|
var response map[string]interface{}
|
||||||
err = json.Unmarshal(body, &response)
|
err = json.Unmarshal(body, &response)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to unmarshal response: %v", err)
|
framework.Logf("Failed to unmarshal response: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
hits, ok := response["hits"].(map[string]interface{})
|
hits, ok := response["hits"].(map[string]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
e2elog.Logf("response[hits] not of the expected type: %T", response["hits"])
|
framework.Logf("response[hits] not of the expected type: %T", response["hits"])
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
h, ok := hits["hits"].([]interface{})
|
h, ok := hits["hits"].([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
e2elog.Logf("Hits not of the expected type: %T", hits["hits"])
|
framework.Logf("Hits not of the expected type: %T", hits["hits"])
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -222,13 +221,13 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
|||||||
for _, e := range h {
|
for _, e := range h {
|
||||||
l, ok := e.(map[string]interface{})
|
l, ok := e.(map[string]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
e2elog.Logf("Element of hit not of expected type: %T", e)
|
framework.Logf("Element of hit not of expected type: %T", e)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
source, ok := l["_source"].(map[string]interface{})
|
source, ok := l["_source"].(map[string]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
e2elog.Logf("_source not of the expected type: %T", l["_source"])
|
framework.Logf("_source not of the expected type: %T", l["_source"])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -244,7 +243,7 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
e2elog.Logf("Log is of unknown type, got %v, want string or object in field 'log'", source)
|
framework.Logf("Log is of unknown type, got %v, want string or object in field 'log'", source)
|
||||||
}
|
}
|
||||||
|
|
||||||
return entries
|
return entries
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/config"
|
"k8s.io/kubernetes/test/e2e/framework/config"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
@ -61,14 +60,14 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer ginkgo.GinkgoRecover()
|
defer ginkgo.GinkgoRecover()
|
||||||
wave := fmt.Sprintf("wave%v", strconv.Itoa(i))
|
wave := fmt.Sprintf("wave%v", strconv.Itoa(i))
|
||||||
e2elog.Logf("Starting logging soak, wave = %v", wave)
|
framework.Logf("Starting logging soak, wave = %v", wave)
|
||||||
RunLogPodsWithSleepOf(f, kbRateInSeconds, wave, totalLogTime)
|
RunLogPodsWithSleepOf(f, kbRateInSeconds, wave, totalLogTime)
|
||||||
e2elog.Logf("Completed logging soak, wave %v", i)
|
framework.Logf("Completed logging soak, wave %v", i)
|
||||||
}()
|
}()
|
||||||
// Niceness.
|
// Niceness.
|
||||||
time.Sleep(loggingSoak.TimeBetweenWaves)
|
time.Sleep(loggingSoak.TimeBetweenWaves)
|
||||||
}
|
}
|
||||||
e2elog.Logf("Waiting on all %v logging soak waves to complete", loggingSoak.Scale)
|
framework.Logf("Waiting on all %v logging soak waves to complete", loggingSoak.Scale)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -122,8 +121,8 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
|
|||||||
pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness)
|
pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Error in wait... %v", err)
|
framework.Failf("Error in wait... %v", err)
|
||||||
} else if len(pods) < totalPods {
|
} else if len(pods) < totalPods {
|
||||||
e2elog.Failf("Only got %v out of %v", len(pods), totalPods)
|
framework.Failf("Only got %v out of %v", len(pods), totalPods)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
|
||||||
"//test/e2e/instrumentation/common:go_default_library",
|
"//test/e2e/instrumentation/common:go_default_library",
|
||||||
"//test/e2e/instrumentation/logging/utils:go_default_library",
|
"//test/e2e/instrumentation/logging/utils:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||||
|
|
||||||
@ -156,7 +155,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
|
|||||||
podName := fmt.Sprintf("synthlogger-%s", string(uuid.NewUUID()))
|
podName := fmt.Sprintf("synthlogger-%s", string(uuid.NewUUID()))
|
||||||
err := utils.NewLoadLoggingPod(podName, "", 1, 1*time.Second).Start(f)
|
err := utils.NewLoadLoggingPod(podName, "", 1, 1*time.Second).Start(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to create a logging pod: %v", err)
|
framework.Logf("Failed to create a logging pod: %v", err)
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}, stopCh)
|
}, stopCh)
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||||
|
|
||||||
@ -86,7 +85,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
|
|||||||
// Starting one pod on each node.
|
// Starting one pod on each node.
|
||||||
for _, pod := range podsByRun[runIdx] {
|
for _, pod := range podsByRun[runIdx] {
|
||||||
if err := pod.Start(f); err != nil {
|
if err := pod.Start(f); err != nil {
|
||||||
e2elog.Logf("Failed to start pod: %v", err)
|
framework.Logf("Failed to start pod: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
<-t.C
|
<-t.C
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||||
|
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
@ -118,7 +117,7 @@ func ensureProjectHasSinkCapacity(sinksService *sd.ProjectsSinksService, project
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(listResponse.Sinks) >= stackdriverSinkCountLimit {
|
if len(listResponse.Sinks) >= stackdriverSinkCountLimit {
|
||||||
e2elog.Logf("Reached Stackdriver sink limit. Deleting all sinks")
|
framework.Logf("Reached Stackdriver sink limit. Deleting all sinks")
|
||||||
deleteSinks(sinksService, projectID, listResponse.Sinks)
|
deleteSinks(sinksService, projectID, listResponse.Sinks)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -137,7 +136,7 @@ func deleteSinks(sinksService *sd.ProjectsSinksService, projectID string, sinks
|
|||||||
for _, sink := range sinks {
|
for _, sink := range sinks {
|
||||||
sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, sink.Name)
|
sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, sink.Name)
|
||||||
if _, err := sinksService.Delete(sinkNameID).Do(); err != nil {
|
if _, err := sinksService.Delete(sinkNameID).Do(); err != nil {
|
||||||
e2elog.Logf("Failed to delete LogSink: %v", err)
|
framework.Logf("Failed to delete LogSink: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -186,21 +185,21 @@ func (p *sdLogProvider) Cleanup() {
|
|||||||
sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, p.logSink.Name)
|
sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, p.logSink.Name)
|
||||||
sinksService := p.sdService.Projects.Sinks
|
sinksService := p.sdService.Projects.Sinks
|
||||||
if _, err := sinksService.Delete(sinkNameID).Do(); err != nil {
|
if _, err := sinksService.Delete(sinkNameID).Do(); err != nil {
|
||||||
e2elog.Logf("Failed to delete LogSink: %v", err)
|
framework.Logf("Failed to delete LogSink: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.subscription != nil {
|
if p.subscription != nil {
|
||||||
subsService := p.pubsubService.Projects.Subscriptions
|
subsService := p.pubsubService.Projects.Subscriptions
|
||||||
if _, err := subsService.Delete(p.subscription.Name).Do(); err != nil {
|
if _, err := subsService.Delete(p.subscription.Name).Do(); err != nil {
|
||||||
e2elog.Logf("Failed to delete PubSub subscription: %v", err)
|
framework.Logf("Failed to delete PubSub subscription: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.topic != nil {
|
if p.topic != nil {
|
||||||
topicsService := p.pubsubService.Projects.Topics
|
topicsService := p.pubsubService.Projects.Topics
|
||||||
if _, err := topicsService.Delete(p.topic.Name).Do(); err != nil {
|
if _, err := topicsService.Delete(p.topic.Name).Do(); err != nil {
|
||||||
e2elog.Logf("Failed to delete PubSub topic: %v", err)
|
framework.Logf("Failed to delete PubSub topic: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -235,7 +234,7 @@ func (p *sdLogProvider) createSink(projectID, sinkName, topicName string) (*sd.L
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
e2elog.Logf("Using the following filter for log entries: %s", filter)
|
framework.Logf("Using the following filter for log entries: %s", filter)
|
||||||
sink := &sd.LogSink{
|
sink := &sd.LogSink{
|
||||||
Name: sinkName,
|
Name: sinkName,
|
||||||
Destination: fmt.Sprintf("pubsub.googleapis.com/%s", topicName),
|
Destination: fmt.Sprintf("pubsub.googleapis.com/%s", topicName),
|
||||||
@ -281,20 +280,20 @@ func (p *sdLogProvider) authorizeSink() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *sdLogProvider) waitSinkInit() error {
|
func (p *sdLogProvider) waitSinkInit() error {
|
||||||
e2elog.Logf("Waiting for log sink to become operational")
|
framework.Logf("Waiting for log sink to become operational")
|
||||||
return wait.Poll(1*time.Second, sinkStartupTimeout, func() (bool, error) {
|
return wait.Poll(1*time.Second, sinkStartupTimeout, func() (bool, error) {
|
||||||
err := publish(p.pubsubService, p.topic, "embrace eternity")
|
err := publish(p.pubsubService, p.topic, "embrace eternity")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to push message to PubSub due to %v", err)
|
framework.Logf("Failed to push message to PubSub due to %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
messages, err := pullAndAck(p.pubsubService, p.subscription)
|
messages, err := pullAndAck(p.pubsubService, p.subscription)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to pull messages from PubSub due to %v", err)
|
framework.Logf("Failed to pull messages from PubSub due to %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if len(messages) > 0 {
|
if len(messages) > 0 {
|
||||||
e2elog.Logf("Sink %s is operational", p.logSink.Name)
|
framework.Logf("Sink %s is operational", p.logSink.Name)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -319,32 +318,32 @@ func (p *sdLogProvider) startPollingLogs() {
|
|||||||
func (p *sdLogProvider) pollLogsOnce() {
|
func (p *sdLogProvider) pollLogsOnce() {
|
||||||
messages, err := pullAndAck(p.pubsubService, p.subscription)
|
messages, err := pullAndAck(p.pubsubService, p.subscription)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to pull messages from PubSub due to %v", err)
|
framework.Logf("Failed to pull messages from PubSub due to %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, msg := range messages {
|
for _, msg := range messages {
|
||||||
logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data)
|
logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data)
|
framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var sdLogEntry sd.LogEntry
|
var sdLogEntry sd.LogEntry
|
||||||
if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil {
|
if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil {
|
||||||
e2elog.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err)
|
framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
name, ok := p.tryGetName(sdLogEntry)
|
name, ok := p.tryGetName(sdLogEntry)
|
||||||
if !ok {
|
if !ok {
|
||||||
e2elog.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type)
|
framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
logEntry, err := convertLogEntry(sdLogEntry)
|
logEntry, err := convertLogEntry(sdLogEntry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to parse Stackdriver LogEntry: %v", err)
|
framework.Logf("Failed to parse Stackdriver LogEntry: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -408,7 +407,7 @@ func pullAndAck(service *pubsub.Service, subs *pubsub.Subscription) ([]*pubsub.R
|
|||||||
if len(ids) > 0 {
|
if len(ids) > 0 {
|
||||||
ackReq := &pubsub.AcknowledgeRequest{AckIds: ids}
|
ackReq := &pubsub.AcknowledgeRequest{AckIds: ids}
|
||||||
if _, err = subsService.Acknowledge(subs.Name, ackReq).Do(); err != nil {
|
if _, err = subsService.Acknowledge(subs.Name, ackReq).Do(); err != nil {
|
||||||
e2elog.Logf("Failed to ack poll: %v", err)
|
framework.Logf("Failed to ack poll: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,7 +25,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
"//vendor/k8s.io/utils/integer:go_default_library",
|
"//vendor/k8s.io/utils/integer:go_default_library",
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
"k8s.io/utils/integer"
|
"k8s.io/utils/integer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -68,13 +67,13 @@ func EnsureLoggingAgentRestartsCount(f *framework.Framework, appName string, max
|
|||||||
for _, pod := range agentPods.Items {
|
for _, pod := range agentPods.Items {
|
||||||
contStatuses := pod.Status.ContainerStatuses
|
contStatuses := pod.Status.ContainerStatuses
|
||||||
if len(contStatuses) == 0 {
|
if len(contStatuses) == 0 {
|
||||||
e2elog.Logf("There are no container statuses for pod %s", pod.Name)
|
framework.Logf("There are no container statuses for pod %s", pod.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
restartCount := int(contStatuses[0].RestartCount)
|
restartCount := int(contStatuses[0].RestartCount)
|
||||||
maxRestartCount = integer.IntMax(maxRestartCount, restartCount)
|
maxRestartCount = integer.IntMax(maxRestartCount, restartCount)
|
||||||
|
|
||||||
e2elog.Logf("Logging agent %s on node %s was restarted %d times",
|
framework.Logf("Logging agent %s on node %s was restarted %d times",
|
||||||
pod.Name, pod.Spec.NodeName, restartCount)
|
pod.Name, pod.Spec.NodeName, restartCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
@ -95,7 +94,7 @@ func (p *loadLoggingPod) Name() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *loadLoggingPod) Start(f *framework.Framework) error {
|
func (p *loadLoggingPod) Start(f *framework.Framework) error {
|
||||||
e2elog.Logf("Starting load logging pod %s", p.name)
|
framework.Logf("Starting load logging pod %s", p.name)
|
||||||
f.PodClient().Create(&v1.Pod{
|
f.PodClient().Create(&v1.Pod{
|
||||||
ObjectMeta: meta_v1.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
Name: p.name,
|
Name: p.name,
|
||||||
@ -161,7 +160,7 @@ func (p *execLoggingPod) Name() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *execLoggingPod) Start(f *framework.Framework) error {
|
func (p *execLoggingPod) Start(f *framework.Framework) error {
|
||||||
e2elog.Logf("Starting repeating logging pod %s", p.name)
|
framework.Logf("Starting repeating logging pod %s", p.name)
|
||||||
f.PodClient().Create(&v1.Pod{
|
f.PodClient().Create(&v1.Pod{
|
||||||
ObjectMeta: meta_v1.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
Name: p.name,
|
Name: p.name,
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LogChecker is an interface for an entity that can check whether logging
|
// LogChecker is an interface for an entity that can check whether logging
|
||||||
@ -195,14 +195,14 @@ func getFullIngestionTimeout(podsMap map[string]FiniteLoggingPod, slack float64)
|
|||||||
totalWant += want
|
totalWant += want
|
||||||
}
|
}
|
||||||
if len(lossMsgs) > 0 {
|
if len(lossMsgs) > 0 {
|
||||||
e2elog.Logf("Still missing logs from:\n%s", strings.Join(lossMsgs, "\n"))
|
framework.Logf("Still missing logs from:\n%s", strings.Join(lossMsgs, "\n"))
|
||||||
}
|
}
|
||||||
lostFrac := 1 - float64(totalGot)/float64(totalWant)
|
lostFrac := 1 - float64(totalGot)/float64(totalWant)
|
||||||
if lostFrac > slack {
|
if lostFrac > slack {
|
||||||
return fmt.Errorf("still missing %.2f%% of logs, only %.2f%% is tolerable",
|
return fmt.Errorf("still missing %.2f%% of logs, only %.2f%% is tolerable",
|
||||||
lostFrac*100, slack*100)
|
lostFrac*100, slack*100)
|
||||||
}
|
}
|
||||||
e2elog.Logf("Missing %.2f%% of logs, which is lower than the threshold %.2f%%",
|
framework.Logf("Missing %.2f%% of logs, which is lower than the threshold %.2f%%",
|
||||||
lostFrac*100, slack*100)
|
lostFrac*100, slack*100)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user