mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Fix error collides with imported package name
Fix error collides with imported package name fix fix
This commit is contained in:
parent
4d337d2d32
commit
4b8080dda8
@ -569,7 +569,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
|||||||
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
|
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
|
||||||
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
|
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
|
||||||
nodes := GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodes := GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
labels := map[string]string{
|
podLabels := map[string]string{
|
||||||
"app": appName + "-pod",
|
"app": appName + "-pod",
|
||||||
}
|
}
|
||||||
for i, node := range nodes.Items {
|
for i, node := range nodes.Items {
|
||||||
@ -579,14 +579,14 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
|
|||||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
|
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
||||||
Labels: labels,
|
Labels: podLabels,
|
||||||
},
|
},
|
||||||
Spec: podSpec(node),
|
Spec: podSpec(node),
|
||||||
})
|
})
|
||||||
ExpectNoError(err)
|
ExpectNoError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return labels
|
return podLabels
|
||||||
}
|
}
|
||||||
|
|
||||||
// KubeUser is a struct for managing kubernetes user info.
|
// KubeUser is a struct for managing kubernetes user info.
|
||||||
|
@ -43,12 +43,12 @@ func RealVersion(s string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func traceRouteToMaster() {
|
func traceRouteToMaster() {
|
||||||
path, err := exec.LookPath("traceroute")
|
traceroute, err := exec.LookPath("traceroute")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Could not find traceroute program")
|
e2elog.Logf("Could not find traceroute program")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cmd := exec.Command(path, "-I", framework.GetMasterHost())
|
cmd := exec.Command(traceroute, "-I", framework.GetMasterHost())
|
||||||
out, err := cmd.Output()
|
out, err := cmd.Output()
|
||||||
if len(out) != 0 {
|
if len(out) != 0 {
|
||||||
e2elog.Logf(string(out))
|
e2elog.Logf(string(out))
|
||||||
|
@ -170,8 +170,8 @@ func (p *Provider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
|
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||||
e2eservice := p.gceCloud.ComputeServices().GA
|
computeservice := p.gceCloud.ComputeServices().GA
|
||||||
list, err := e2eservice.ForwardingRules.List(project, region).Do()
|
list, err := computeservice.ForwardingRules.List(project, region).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -374,7 +374,7 @@ func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
|
|||||||
userNick := "user"
|
userNick := "user"
|
||||||
contextNick := "context"
|
contextNick := "context"
|
||||||
|
|
||||||
config := clientcmdapi.NewConfig()
|
configCmd := clientcmdapi.NewConfig()
|
||||||
|
|
||||||
credentials := clientcmdapi.NewAuthInfo()
|
credentials := clientcmdapi.NewAuthInfo()
|
||||||
credentials.Token = clientCfg.BearerToken
|
credentials.Token = clientCfg.BearerToken
|
||||||
@ -387,7 +387,7 @@ func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
|
|||||||
if len(credentials.ClientKey) == 0 {
|
if len(credentials.ClientKey) == 0 {
|
||||||
credentials.ClientKeyData = clientCfg.TLSClientConfig.KeyData
|
credentials.ClientKeyData = clientCfg.TLSClientConfig.KeyData
|
||||||
}
|
}
|
||||||
config.AuthInfos[userNick] = credentials
|
configCmd.AuthInfos[userNick] = credentials
|
||||||
|
|
||||||
cluster := clientcmdapi.NewCluster()
|
cluster := clientcmdapi.NewCluster()
|
||||||
cluster.Server = clientCfg.Host
|
cluster.Server = clientCfg.Host
|
||||||
@ -396,15 +396,15 @@ func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
|
|||||||
cluster.CertificateAuthorityData = clientCfg.CAData
|
cluster.CertificateAuthorityData = clientCfg.CAData
|
||||||
}
|
}
|
||||||
cluster.InsecureSkipTLSVerify = clientCfg.Insecure
|
cluster.InsecureSkipTLSVerify = clientCfg.Insecure
|
||||||
config.Clusters[clusterNick] = cluster
|
configCmd.Clusters[clusterNick] = cluster
|
||||||
|
|
||||||
context := clientcmdapi.NewContext()
|
context := clientcmdapi.NewContext()
|
||||||
context.Cluster = clusterNick
|
context.Cluster = clusterNick
|
||||||
context.AuthInfo = userNick
|
context.AuthInfo = userNick
|
||||||
config.Contexts[contextNick] = context
|
configCmd.Contexts[contextNick] = context
|
||||||
config.CurrentContext = contextNick
|
configCmd.CurrentContext = contextNick
|
||||||
|
|
||||||
return config
|
return configCmd
|
||||||
}
|
}
|
||||||
|
|
||||||
// AfterReadingAllFlags makes changes to the context after all flags
|
// AfterReadingAllFlags makes changes to the context after all flags
|
||||||
|
@ -354,17 +354,17 @@ func SkipUnlessTaintBasedEvictionsEnabled() {
|
|||||||
|
|
||||||
// SkipIfContainerRuntimeIs skips if the container runtime is included in the runtimes.
|
// SkipIfContainerRuntimeIs skips if the container runtime is included in the runtimes.
|
||||||
func SkipIfContainerRuntimeIs(runtimes ...string) {
|
func SkipIfContainerRuntimeIs(runtimes ...string) {
|
||||||
for _, runtime := range runtimes {
|
for _, containerRuntime := range runtimes {
|
||||||
if runtime == TestContext.ContainerRuntime {
|
if containerRuntime == TestContext.ContainerRuntime {
|
||||||
skipInternalf(1, "Not supported under container runtime %s", runtime)
|
skipInternalf(1, "Not supported under container runtime %s", containerRuntime)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunIfContainerRuntimeIs runs if the container runtime is included in the runtimes.
|
// RunIfContainerRuntimeIs runs if the container runtime is included in the runtimes.
|
||||||
func RunIfContainerRuntimeIs(runtimes ...string) {
|
func RunIfContainerRuntimeIs(runtimes ...string) {
|
||||||
for _, runtime := range runtimes {
|
for _, containerRuntime := range runtimes {
|
||||||
if runtime == TestContext.ContainerRuntime {
|
if containerRuntime == TestContext.ContainerRuntime {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3054,14 +3054,14 @@ func getMasterAddresses(c clientset.Interface) (string, string, string) {
|
|||||||
internalIP = eps.Subsets[0].Addresses[0].IP
|
internalIP = eps.Subsets[0].Addresses[0].IP
|
||||||
|
|
||||||
// Populate the external IP/hostname.
|
// Populate the external IP/hostname.
|
||||||
url, err := url.Parse(TestContext.Host)
|
hostURL, err := url.Parse(TestContext.Host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to parse hostname: %v", err)
|
e2elog.Failf("Failed to parse hostname: %v", err)
|
||||||
}
|
}
|
||||||
if net.ParseIP(url.Host) != nil {
|
if net.ParseIP(hostURL.Host) != nil {
|
||||||
externalIP = url.Host
|
externalIP = hostURL.Host
|
||||||
} else {
|
} else {
|
||||||
hostname = url.Host
|
hostname = hostURL.Host
|
||||||
}
|
}
|
||||||
|
|
||||||
return externalIP, internalIP, hostname
|
return externalIP, internalIP, hostname
|
||||||
@ -3242,7 +3242,7 @@ func DumpDebugInfo(c clientset.Interface, ns string) {
|
|||||||
|
|
||||||
// DsFromManifest reads a .json/yaml file and returns the daemonset in it.
|
// DsFromManifest reads a .json/yaml file and returns the daemonset in it.
|
||||||
func DsFromManifest(url string) (*appsv1.DaemonSet, error) {
|
func DsFromManifest(url string) (*appsv1.DaemonSet, error) {
|
||||||
var controller appsv1.DaemonSet
|
var ds appsv1.DaemonSet
|
||||||
e2elog.Logf("Parsing ds from %v", url)
|
e2elog.Logf("Parsing ds from %v", url)
|
||||||
|
|
||||||
var response *http.Response
|
var response *http.Response
|
||||||
@ -3269,16 +3269,16 @@ func DsFromManifest(url string) (*appsv1.DaemonSet, error) {
|
|||||||
return nil, fmt.Errorf("Failed to read html response body: %v", err)
|
return nil, fmt.Errorf("Failed to read html response body: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
json, err := utilyaml.ToJSON(data)
|
dataJSON, err := utilyaml.ToJSON(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Failed to parse data to json: %v", err)
|
return nil, fmt.Errorf("Failed to parse data to json: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), json, &controller)
|
err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), dataJSON, &ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Failed to decode DaemonSet spec: %v", err)
|
return nil, fmt.Errorf("Failed to decode DaemonSet spec: %v", err)
|
||||||
}
|
}
|
||||||
return &controller, nil
|
return &ds, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitForServerPreferredNamespacedResources waits until server preferred namespaced resources could be successfully discovered.
|
// waitForServerPreferredNamespacedResources waits until server preferred namespaced resources could be successfully discovered.
|
||||||
|
@ -44,7 +44,7 @@ func TestViperConfig(t *testing.T) {
|
|||||||
config.AddOptionsToSet(flags, &context, "")
|
config.AddOptionsToSet(flags, &context, "")
|
||||||
})
|
})
|
||||||
|
|
||||||
config := `
|
viperConfig := `
|
||||||
bool: false
|
bool: false
|
||||||
duration: 1s
|
duration: 1s
|
||||||
float64: -1.23456789
|
float64: -1.23456789
|
||||||
@ -57,7 +57,7 @@ uint64: 9123456789012345678
|
|||||||
tmpfile, err := ioutil.TempFile("", "viperconfig-*.yaml")
|
tmpfile, err := ioutil.TempFile("", "viperconfig-*.yaml")
|
||||||
require.NoError(t, err, "temp file")
|
require.NoError(t, err, "temp file")
|
||||||
defer os.Remove(tmpfile.Name())
|
defer os.Remove(tmpfile.Name())
|
||||||
if _, err := tmpfile.Write([]byte(config)); err != nil {
|
if _, err := tmpfile.Write([]byte(viperConfig)); err != nil {
|
||||||
require.NoError(t, err, "write config")
|
require.NoError(t, err, "write config")
|
||||||
}
|
}
|
||||||
require.NoError(t, tmpfile.Close(), "close temp file")
|
require.NoError(t, tmpfile.Close(), "close temp file")
|
||||||
|
@ -191,15 +191,15 @@ func computeContainerResourceUsage(name string, oldStats, newStats *cadvisorapiv
|
|||||||
func (r *ResourceCollector) GetLatest() (e2ekubelet.ResourceUsagePerContainer, error) {
|
func (r *ResourceCollector) GetLatest() (e2ekubelet.ResourceUsagePerContainer, error) {
|
||||||
r.lock.RLock()
|
r.lock.RLock()
|
||||||
defer r.lock.RUnlock()
|
defer r.lock.RUnlock()
|
||||||
kubeletstatsv1alpha1 := make(e2ekubelet.ResourceUsagePerContainer)
|
resourceUsage := make(e2ekubelet.ResourceUsagePerContainer)
|
||||||
for key, name := range systemContainers {
|
for key, name := range systemContainers {
|
||||||
contStats, ok := r.buffers[name]
|
contStats, ok := r.buffers[name]
|
||||||
if !ok || len(contStats) == 0 {
|
if !ok || len(contStats) == 0 {
|
||||||
return nil, fmt.Errorf("No resource usage data for %s container (%s)", key, name)
|
return nil, fmt.Errorf("No resource usage data for %s container (%s)", key, name)
|
||||||
}
|
}
|
||||||
kubeletstatsv1alpha1[key] = contStats[len(contStats)-1]
|
resourceUsage[key] = contStats[len(contStats)-1]
|
||||||
}
|
}
|
||||||
return kubeletstatsv1alpha1, nil
|
return resourceUsage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type resourceUsageByCPU []*e2ekubelet.ContainerResourceUsage
|
type resourceUsageByCPU []*e2ekubelet.ContainerResourceUsage
|
||||||
|
Loading…
Reference in New Issue
Block a user