mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 23:37:01 +00:00
fix duplicated imports of k8s code (#77484)
* fix duplicated imports of api/core/v1 * fix duplicated imports of client-go/kubernetes * fix duplicated imports of rest code * change import name to more reasonable
This commit is contained in:
parent
f7d92fb963
commit
5268f69405
@ -25,7 +25,6 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kuberuntime "k8s.io/apimachinery/pkg/runtime"
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
core "k8s.io/client-go/testing"
|
||||
@ -580,7 +579,7 @@ func TestDeploymentsHaveSystemClusterCriticalPriorityClassName(t *testing.T) {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
deploymentBytes, _ := kubeadmutil.ParseTemplate(testCase.manifest, testCase.data)
|
||||
deployment := &apps.Deployment{}
|
||||
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), deploymentBytes, deployment); err != nil {
|
||||
if err := runtime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), deploymentBytes, deployment); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if deployment.Spec.Template.Spec.PriorityClassName != "system-cluster-critical" {
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kuberuntime "k8s.io/apimachinery/pkg/runtime"
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
core "k8s.io/client-go/testing"
|
||||
@ -278,7 +277,7 @@ func TestDaemonSetsHaveSystemNodeCriticalPriorityClassName(t *testing.T) {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
daemonSetBytes, _ := kubeadmutil.ParseTemplate(testCase.manifest, testCase.data)
|
||||
daemonSet := &apps.DaemonSet{}
|
||||
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), daemonSetBytes, daemonSet); err != nil {
|
||||
if err := runtime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), daemonSetBytes, daemonSet); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if daemonSet.Spec.Template.Spec.PriorityClassName != "system-node-critical" {
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
versionutil "k8s.io/apimachinery/pkg/util/version"
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
@ -122,7 +121,7 @@ func (f fakeEtcdClient) RemoveMember(id uint64) ([]etcdutil.Member, error) {
|
||||
return []etcdutil.Member{}, nil
|
||||
}
|
||||
|
||||
func getEtcdVersion(v *version.Version) string {
|
||||
func getEtcdVersion(v *versionutil.Version) string {
|
||||
return constants.SupportedEtcdVersion[uint8(v.Minor())]
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,6 @@ import (
|
||||
netutil "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
@ -77,7 +76,7 @@ func NewKubeWaiter(client clientset.Interface, timeout time.Duration, writer io.
|
||||
// WaitForAPI waits for the API Server's /healthz endpoint to report "ok"
|
||||
func (w *KubeWaiter) WaitForAPI() error {
|
||||
start := time.Now()
|
||||
return wait.PollImmediate(constants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||
return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||
healthStatus := 0
|
||||
w.client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
|
||||
if healthStatus != http.StatusOK {
|
||||
@ -94,7 +93,7 @@ func (w *KubeWaiter) WaitForAPI() error {
|
||||
func (w *KubeWaiter) WaitForPodsWithLabel(kvLabel string) error {
|
||||
|
||||
lastKnownPodNumber := -1
|
||||
return wait.PollImmediate(constants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||
return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||
listOpts := metav1.ListOptions{LabelSelector: kvLabel}
|
||||
pods, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
|
||||
if err != nil {
|
||||
@ -123,7 +122,7 @@ func (w *KubeWaiter) WaitForPodsWithLabel(kvLabel string) error {
|
||||
|
||||
// WaitForPodToDisappear blocks until it timeouts or gets a "NotFound" response from the API Server when getting the Static Pod in question
|
||||
func (w *KubeWaiter) WaitForPodToDisappear(podName string) error {
|
||||
return wait.PollImmediate(constants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||
return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||
_, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).Get(podName, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
fmt.Printf("[apiclient] The old Pod %q is now removed (which is desired)\n", podName)
|
||||
@ -187,8 +186,8 @@ func (w *KubeWaiter) WaitForStaticPodControlPlaneHashes(nodeName string) (map[st
|
||||
componentHash := ""
|
||||
var err error
|
||||
mirrorPodHashes := map[string]string{}
|
||||
for _, component := range constants.ControlPlaneComponents {
|
||||
err = wait.PollImmediate(constants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||
for _, component := range kubeadmconstants.ControlPlaneComponents {
|
||||
err = wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||
componentHash, err = getStaticPodSingleHash(w.client, nodeName, component)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
@ -209,7 +208,7 @@ func (w *KubeWaiter) WaitForStaticPodSingleHash(nodeName string, component strin
|
||||
|
||||
componentPodHash := ""
|
||||
var err error
|
||||
err = wait.PollImmediate(constants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||
err = wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||
componentPodHash, err = getStaticPodSingleHash(w.client, nodeName, component)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
@ -223,7 +222,7 @@ func (w *KubeWaiter) WaitForStaticPodSingleHash(nodeName string, component strin
|
||||
// WaitForStaticPodHashChange blocks until it timeouts or notices that the Mirror Pod (for the Static Pod, respectively) has changed
|
||||
// This implicitly means this function blocks until the kubelet has restarted the Static Pod in question
|
||||
func (w *KubeWaiter) WaitForStaticPodHashChange(nodeName, component, previousHash string) error {
|
||||
return wait.PollImmediate(constants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||
return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||
|
||||
hash, err := getStaticPodSingleHash(w.client, nodeName, component)
|
||||
if err != nil {
|
||||
|
@ -19,7 +19,6 @@ package pkiutil
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
@ -553,7 +552,7 @@ func NewPrivateKey() (crypto.Signer, error) {
|
||||
|
||||
// NewSignedCert creates a signed certificate using the given CA certificate and key
|
||||
func NewSignedCert(cfg *certutil.Config, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer) (*x509.Certificate, error) {
|
||||
serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64))
|
||||
serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apitesting "k8s.io/apimachinery/pkg/api/apitesting"
|
||||
"k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
|
||||
genericfuzzer "k8s.io/apimachinery/pkg/apis/meta/fuzzer"
|
||||
metafuzzer "k8s.io/apimachinery/pkg/apis/meta/fuzzer"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
@ -92,7 +91,6 @@ func overrideGenericFuncs(codecs runtimeserializer.CodecFactory) []interface{} {
|
||||
|
||||
// FuzzerFuncs is a list of fuzzer functions
|
||||
var FuzzerFuncs = fuzzer.MergeFuzzerFuncs(
|
||||
genericfuzzer.Funcs,
|
||||
overrideGenericFuncs,
|
||||
corefuzzer.Funcs,
|
||||
extensionsfuzzer.Funcs,
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
genericvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/util/webhook"
|
||||
@ -224,7 +223,7 @@ func validateMutatingWebhookConfiguration(e *admissionregistration.MutatingWebho
|
||||
func validateWebhook(hook *admissionregistration.Webhook, fldPath *field.Path) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
// hook.Name must be fully qualified
|
||||
allErrors = append(allErrors, validation.IsFullyQualifiedName(fldPath.Child("name"), hook.Name)...)
|
||||
allErrors = append(allErrors, utilvalidation.IsFullyQualifiedName(fldPath.Child("name"), hook.Name)...)
|
||||
|
||||
for i, rule := range hook.Rules {
|
||||
allErrors = append(allErrors, validateRuleWithOperations(&rule, fldPath.Child("rules").Index(i))...)
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
utilwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
webhookinit "k8s.io/apiserver/pkg/admission/plugin/webhook/initializer"
|
||||
"k8s.io/apiserver/pkg/server"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/util/webhook"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||
@ -45,7 +44,7 @@ type Config struct {
|
||||
}
|
||||
|
||||
// New sets up the plugins and admission start hooks needed for admission
|
||||
func (c *Config) New(proxyTransport *http.Transport, serviceResolver webhook.ServiceResolver) ([]admission.PluginInitializer, server.PostStartHookFunc, error) {
|
||||
func (c *Config) New(proxyTransport *http.Transport, serviceResolver webhook.ServiceResolver) ([]admission.PluginInitializer, genericapiserver.PostStartHookFunc, error) {
|
||||
webhookAuthResolverWrapper := webhook.NewDefaultAuthenticationInfoResolverWrapper(proxyTransport, c.LoopbackClientConfig)
|
||||
webhookPluginInitializer := webhookinit.NewPluginInitializer(webhookAuthResolverWrapper, serviceResolver)
|
||||
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
@ -51,8 +50,8 @@ func TestCreateClusterRole(t *testing.T) {
|
||||
verbs: "get,watch,list",
|
||||
resources: "pods,pods",
|
||||
expectedClusterRole: &rbac.ClusterRole{
|
||||
TypeMeta: v1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterRoleName,
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
@ -69,8 +68,8 @@ func TestCreateClusterRole(t *testing.T) {
|
||||
verbs: "get,watch,list",
|
||||
resources: "pods,deployments.extensions",
|
||||
expectedClusterRole: &rbac.ClusterRole{
|
||||
TypeMeta: v1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterRoleName,
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
@ -93,8 +92,8 @@ func TestCreateClusterRole(t *testing.T) {
|
||||
verbs: "get",
|
||||
nonResourceURL: "/logs/,/healthz",
|
||||
expectedClusterRole: &rbac.ClusterRole{
|
||||
TypeMeta: v1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterRoleName,
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
@ -110,8 +109,8 @@ func TestCreateClusterRole(t *testing.T) {
|
||||
nonResourceURL: "/logs/,/healthz",
|
||||
resources: "pods",
|
||||
expectedClusterRole: &rbac.ClusterRole{
|
||||
TypeMeta: v1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterRoleName,
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
@ -131,8 +130,8 @@ func TestCreateClusterRole(t *testing.T) {
|
||||
"test-aggregation-rules": {
|
||||
aggregationRule: "foo1=foo2,foo3=foo4",
|
||||
expectedClusterRole: &rbac.ClusterRole{
|
||||
TypeMeta: v1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterRoleName,
|
||||
},
|
||||
AggregationRule: &rbac.AggregationRule{
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -296,7 +295,7 @@ func (o *RollingUpdateOptions) Run() error {
|
||||
return fmt.Errorf("%s contains a %v not a ReplicationController", filename, infos[0].Object.GetObjectKind().GroupVersionKind())
|
||||
}
|
||||
switch t := uncastVersionedObj.(type) {
|
||||
case *v1.ReplicationController:
|
||||
case *corev1.ReplicationController:
|
||||
replicasDefaulted = t.Spec.Replicas == nil
|
||||
newRc = t
|
||||
}
|
||||
@ -310,7 +309,7 @@ func (o *RollingUpdateOptions) Run() error {
|
||||
// than the old rc. This selector is the hash of the rc, with a suffix to provide uniqueness for
|
||||
// same-image updates.
|
||||
if len(o.Image) != 0 {
|
||||
codec := scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion)
|
||||
codec := scheme.Codecs.LegacyCodec(corev1.SchemeGroupVersion)
|
||||
newName := o.FindNewName(oldRc)
|
||||
if newRc, err = kubectl.LoadExistingNextReplicationController(coreClient, o.Namespace, newName); err != nil {
|
||||
return err
|
||||
|
@ -28,7 +28,6 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
@ -1506,9 +1505,9 @@ func TestDescribeDeployment(t *testing.T) {
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: utilpointer.Int32Ptr(1),
|
||||
Selector: &metav1.LabelSelector{},
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{Image: "mytest-image:latest"},
|
||||
},
|
||||
},
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
@ -99,7 +98,7 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt
|
||||
return nil, fmt.Errorf("cannot get the logs from %T: %v", object, err)
|
||||
}
|
||||
|
||||
sortBy := func(pods []*v1.Pod) sort.Interface { return podutils.ByLogging(pods) }
|
||||
sortBy := func(pods []*corev1.Pod) sort.Interface { return podutils.ByLogging(pods) }
|
||||
pod, numPods, err := GetFirstPod(clientset, namespace, selector.String(), timeout, sortBy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kuberuntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@ -221,7 +220,7 @@ func (r *remoteConfigMap) Informer(client clientset.Interface, handler cache.Res
|
||||
resyncPeriod := time.Duration(float64(minResyncPeriod.Nanoseconds()) * factor)
|
||||
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (kuberuntime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).List(metav1.ListOptions{
|
||||
FieldSelector: fieldselector.String(),
|
||||
})
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
corevalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
)
|
||||
|
||||
@ -54,7 +53,7 @@ func (podTemplateStrategy) PrepareForCreate(ctx context.Context, obj runtime.Obj
|
||||
// Validate validates a new pod template.
|
||||
func (podTemplateStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
template := obj.(*api.PodTemplate)
|
||||
allErrs := validation.ValidatePodTemplate(template)
|
||||
allErrs := corevalidation.ValidatePodTemplate(template)
|
||||
allErrs = append(allErrs, corevalidation.ValidateConditionalPodTemplate(&template.Template, nil, field.NewPath("template"))...)
|
||||
return allErrs
|
||||
}
|
||||
@ -80,7 +79,7 @@ func (podTemplateStrategy) PrepareForUpdate(ctx context.Context, obj, old runtim
|
||||
func (podTemplateStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
template := obj.(*api.PodTemplate)
|
||||
oldTemplate := old.(*api.PodTemplate)
|
||||
allErrs := validation.ValidatePodTemplateUpdate(template, oldTemplate)
|
||||
allErrs := corevalidation.ValidatePodTemplateUpdate(template, oldTemplate)
|
||||
allErrs = append(allErrs, corevalidation.ValidateConditionalPodTemplate(&template.Template, &oldTemplate.Template, field.NewPath("template"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -55,7 +54,7 @@ const numRepairsBeforeLeakCleanup = 3
|
||||
func NewRepair(interval time.Duration, serviceClient corev1client.ServicesGetter, eventClient corev1client.EventsGetter, portRange net.PortRange, alloc rangeallocation.RangeRegistry) *Repair {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: eventClient.Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "portallocator-repair-controller"})
|
||||
recorder := eventBroadcaster.NewRecorder(legacyscheme.Scheme, corev1.EventSource{Component: "portallocator-repair-controller"})
|
||||
|
||||
return &Repair{
|
||||
interval: interval,
|
||||
@ -139,24 +138,24 @@ func (c *Repair) runOnce() error {
|
||||
stored.Release(port)
|
||||
} else {
|
||||
// doesn't seem to be allocated
|
||||
c.recorder.Eventf(svc, v1.EventTypeWarning, "PortNotAllocated", "Port %d is not allocated; repairing", port)
|
||||
c.recorder.Eventf(svc, corev1.EventTypeWarning, "PortNotAllocated", "Port %d is not allocated; repairing", port)
|
||||
runtime.HandleError(fmt.Errorf("the node port %d for service %s/%s is not allocated; repairing", port, svc.Name, svc.Namespace))
|
||||
}
|
||||
delete(c.leaks, port) // it is used, so it can't be leaked
|
||||
case portallocator.ErrAllocated:
|
||||
// port is duplicate, reallocate
|
||||
c.recorder.Eventf(svc, v1.EventTypeWarning, "PortAlreadyAllocated", "Port %d was assigned to multiple services; please recreate service", port)
|
||||
c.recorder.Eventf(svc, corev1.EventTypeWarning, "PortAlreadyAllocated", "Port %d was assigned to multiple services; please recreate service", port)
|
||||
runtime.HandleError(fmt.Errorf("the node port %d for service %s/%s was assigned to multiple services; please recreate", port, svc.Name, svc.Namespace))
|
||||
case err.(*portallocator.ErrNotInRange):
|
||||
// port is out of range, reallocate
|
||||
c.recorder.Eventf(svc, v1.EventTypeWarning, "PortOutOfRange", "Port %d is not within the port range %s; please recreate service", port, c.portRange)
|
||||
c.recorder.Eventf(svc, corev1.EventTypeWarning, "PortOutOfRange", "Port %d is not within the port range %s; please recreate service", port, c.portRange)
|
||||
runtime.HandleError(fmt.Errorf("the port %d for service %s/%s is not within the port range %s; please recreate", port, svc.Name, svc.Namespace, c.portRange))
|
||||
case portallocator.ErrFull:
|
||||
// somehow we are out of ports
|
||||
c.recorder.Eventf(svc, v1.EventTypeWarning, "PortRangeFull", "Port range %s is full; you must widen the port range in order to create new services", c.portRange)
|
||||
c.recorder.Eventf(svc, corev1.EventTypeWarning, "PortRangeFull", "Port range %s is full; you must widen the port range in order to create new services", c.portRange)
|
||||
return fmt.Errorf("the port range %s is full; you must widen the port range in order to create new services", c.portRange)
|
||||
default:
|
||||
c.recorder.Eventf(svc, v1.EventTypeWarning, "UnknownError", "Unable to allocate port %d due to an unknown error", port)
|
||||
c.recorder.Eventf(svc, corev1.EventTypeWarning, "UnknownError", "Unable to allocate port %d due to an unknown error", port)
|
||||
return fmt.Errorf("unable to allocate port %d for service %s/%s due to an unknown error, exiting: %v", port, svc.Name, svc.Namespace, err)
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -276,7 +275,7 @@ func extractNSTolerations(ns *corev1.Namespace, key string) ([]api.Toleration, e
|
||||
return []api.Toleration{}, nil
|
||||
}
|
||||
|
||||
var v1Tolerations []v1.Toleration
|
||||
var v1Tolerations []corev1.Toleration
|
||||
err := json.Unmarshal([]byte(ns.Annotations[key]), &v1Tolerations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
genericvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
|
||||
validationutil "k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/apiserver/pkg/util/webhook"
|
||||
@ -123,7 +122,7 @@ func validateCustomResourceDefinitionSpec(spec *apiextensions.CustomResourceDefi
|
||||
|
||||
if len(spec.Group) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("group"), ""))
|
||||
} else if errs := validationutil.IsDNS1123Subdomain(spec.Group); len(errs) > 0 {
|
||||
} else if errs := utilvalidation.IsDNS1123Subdomain(spec.Group); len(errs) > 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("group"), spec.Group, strings.Join(errs, ",")))
|
||||
} else if len(strings.Split(spec.Group, ".")) < 2 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("group"), spec.Group, "should be a domain with at least one dot"))
|
||||
@ -143,7 +142,7 @@ func validateCustomResourceDefinitionSpec(spec *apiextensions.CustomResourceDefi
|
||||
} else {
|
||||
versionsMap[version.Name] = true
|
||||
}
|
||||
if errs := validationutil.IsDNS1035Label(version.Name); len(errs) > 0 {
|
||||
if errs := utilvalidation.IsDNS1035Label(version.Name); len(errs) > 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("versions").Index(i).Child("name"), spec.Versions[i].Name, strings.Join(errs, ",")))
|
||||
}
|
||||
subresources := getSubresourcesForVersion(spec, version.Name)
|
||||
@ -179,7 +178,7 @@ func validateCustomResourceDefinitionSpec(spec *apiextensions.CustomResourceDefi
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("versions"), spec.Versions, "must have exactly one version marked as storage version"))
|
||||
}
|
||||
if len(spec.Version) != 0 {
|
||||
if errs := validationutil.IsDNS1035Label(spec.Version); len(errs) > 0 {
|
||||
if errs := utilvalidation.IsDNS1035Label(spec.Version); len(errs) > 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("version"), spec.Version, strings.Join(errs, ",")))
|
||||
}
|
||||
if len(spec.Versions) >= 1 && spec.Versions[0].Name != spec.Version {
|
||||
@ -466,21 +465,21 @@ func ValidateCustomResourceDefinitionStatus(status *apiextensions.CustomResource
|
||||
// ValidateCustomResourceDefinitionNames statically validates
|
||||
func ValidateCustomResourceDefinitionNames(names *apiextensions.CustomResourceDefinitionNames, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if errs := validationutil.IsDNS1035Label(names.Plural); len(names.Plural) > 0 && len(errs) > 0 {
|
||||
if errs := utilvalidation.IsDNS1035Label(names.Plural); len(names.Plural) > 0 && len(errs) > 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("plural"), names.Plural, strings.Join(errs, ",")))
|
||||
}
|
||||
if errs := validationutil.IsDNS1035Label(names.Singular); len(names.Singular) > 0 && len(errs) > 0 {
|
||||
if errs := utilvalidation.IsDNS1035Label(names.Singular); len(names.Singular) > 0 && len(errs) > 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("singular"), names.Singular, strings.Join(errs, ",")))
|
||||
}
|
||||
if errs := validationutil.IsDNS1035Label(strings.ToLower(names.Kind)); len(names.Kind) > 0 && len(errs) > 0 {
|
||||
if errs := utilvalidation.IsDNS1035Label(strings.ToLower(names.Kind)); len(names.Kind) > 0 && len(errs) > 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), names.Kind, "may have mixed case, but should otherwise match: "+strings.Join(errs, ",")))
|
||||
}
|
||||
if errs := validationutil.IsDNS1035Label(strings.ToLower(names.ListKind)); len(names.ListKind) > 0 && len(errs) > 0 {
|
||||
if errs := utilvalidation.IsDNS1035Label(strings.ToLower(names.ListKind)); len(names.ListKind) > 0 && len(errs) > 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("listKind"), names.ListKind, "may have mixed case, but should otherwise match: "+strings.Join(errs, ",")))
|
||||
}
|
||||
|
||||
for i, shortName := range names.ShortNames {
|
||||
if errs := validationutil.IsDNS1035Label(shortName); len(errs) > 0 {
|
||||
if errs := utilvalidation.IsDNS1035Label(shortName); len(errs) > 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("shortNames").Index(i), shortName, strings.Join(errs, ",")))
|
||||
}
|
||||
}
|
||||
@ -491,7 +490,7 @@ func ValidateCustomResourceDefinitionNames(names *apiextensions.CustomResourceDe
|
||||
}
|
||||
|
||||
for i, category := range names.Categories {
|
||||
if errs := validationutil.IsDNS1035Label(category); len(errs) > 0 {
|
||||
if errs := utilvalidation.IsDNS1035Label(category); len(errs) > 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("categories").Index(i), category, strings.Join(errs, ",")))
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
_ "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset"
|
||||
_ "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions"
|
||||
_ "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion"
|
||||
internalinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion"
|
||||
"k8s.io/apiextensions-apiserver/pkg/controller/establish"
|
||||
"k8s.io/apiextensions-apiserver/pkg/controller/finalizer"
|
||||
|
@ -31,7 +31,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
openapi "github.com/go-openapi/spec"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@ -362,7 +361,7 @@ func TestUpdateOpenAPISpec(t *testing.T) {
|
||||
|
||||
// verify we are able to update the served spec using the exposed service
|
||||
newSpec := []byte(`{"swagger":"2.0","info":{"title":"Test Updated Generic API Server Swagger","version":"v0.1.0"},"paths":null}`)
|
||||
swagger := new(spec.Swagger)
|
||||
swagger := new(openapi.Swagger)
|
||||
err = json.Unmarshal(newSpec, swagger)
|
||||
assert.NoError(err)
|
||||
|
||||
|
@ -18,7 +18,6 @@ package options
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
@ -28,7 +27,7 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
|
||||
// add the generic feature gates
|
||||
_ "k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
@ -43,17 +43,16 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/apiserver/pkg/server"
|
||||
. "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/client-go/discovery"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
)
|
||||
|
||||
func setUp(t *testing.T) Config {
|
||||
func setUp(t *testing.T) server.Config {
|
||||
scheme := runtime.NewScheme()
|
||||
codecs := serializer.NewCodecFactory(scheme)
|
||||
|
||||
config := NewConfig(codecs)
|
||||
config := server.NewConfig(codecs)
|
||||
|
||||
return *config
|
||||
}
|
||||
@ -211,7 +210,7 @@ func TestGetNamedCertificateMap(t *testing.T) {
|
||||
|
||||
NextTest:
|
||||
for i, test := range tests {
|
||||
var namedTLSCerts []NamedTLSCert
|
||||
var namedTLSCerts []server.NamedTLSCert
|
||||
bySignature := map[string]int{} // index in test.certs by cert signature
|
||||
for j, c := range test.certs {
|
||||
cert, err := createTestTLSCerts(c.TestCertSpec)
|
||||
@ -220,7 +219,7 @@ NextTest:
|
||||
continue NextTest
|
||||
}
|
||||
|
||||
namedTLSCerts = append(namedTLSCerts, NamedTLSCert{
|
||||
namedTLSCerts = append(namedTLSCerts, server.NamedTLSCert{
|
||||
TLSCert: cert,
|
||||
Names: c.explicitNames,
|
||||
})
|
||||
@ -233,7 +232,7 @@ NextTest:
|
||||
bySignature[sig] = j
|
||||
}
|
||||
|
||||
certMap, err := GetNamedCertificateMap(namedTLSCerts)
|
||||
certMap, err := server.GetNamedCertificateMap(namedTLSCerts)
|
||||
if err == nil && len(test.errorString) != 0 {
|
||||
t.Errorf("%d - expected no error, got: %v", i, err)
|
||||
} else if err != nil && err.Error() != test.errorString {
|
||||
@ -496,7 +495,7 @@ func TestServerRunWithSNI(t *testing.T) {
|
||||
|
||||
// add poststart hook to know when the server is up.
|
||||
startedCh := make(chan struct{})
|
||||
s.AddPostStartHookOrDie("test-notifier", func(context PostStartHookContext) error {
|
||||
s.AddPostStartHookOrDie("test-notifier", func(context server.PostStartHookContext) error {
|
||||
close(startedCh)
|
||||
return nil
|
||||
})
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv"
|
||||
@ -96,7 +95,7 @@ func (f *ConfigMapFactory) MakeConfigMap(
|
||||
|
||||
// addKvToConfigMap adds the given key and data to the given config map.
|
||||
// Error if key invalid, or already exists.
|
||||
func addKvToConfigMap(configMap *v1.ConfigMap, keyName, data string) error {
|
||||
func addKvToConfigMap(configMap *corev1.ConfigMap, keyName, data string) error {
|
||||
// Note, the rules for ConfigMap keys are the exact same as the ones for SecretKeys.
|
||||
if errs := validation.IsConfigMapKey(keyName); len(errs) != 0 {
|
||||
return fmt.Errorf("%q is not a valid key name for a ConfigMap: %s", keyName, strings.Join(errs, ";"))
|
||||
|
@ -30,7 +30,6 @@ import (
|
||||
"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud"
|
||||
"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta"
|
||||
"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock"
|
||||
ga "google.golang.org/api/compute/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
@ -635,7 +634,7 @@ func TestFirewallNeedsUpdate(t *testing.T) {
|
||||
ports []v1.ServicePort
|
||||
ipnet utilnet.IPNetSet
|
||||
fwIPProtocol string
|
||||
getHook func(context.Context, *meta.Key, *cloud.MockFirewalls) (bool, *ga.Firewall, error)
|
||||
getHook func(context.Context, *meta.Key, *cloud.MockFirewalls) (bool, *compute.Firewall, error)
|
||||
sourceRange string
|
||||
exists bool
|
||||
needsUpdate bool
|
||||
@ -1073,7 +1072,7 @@ func TestExternalLoadBalancerEnsureHttpHealthCheck(t *testing.T) {
|
||||
gce, err := fakeGCECloud(DefaultTestClusterValues())
|
||||
require.NoError(t, err)
|
||||
c := gce.c.(*cloud.MockGCE)
|
||||
c.MockHttpHealthChecks.UpdateHook = func(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck, m *cloud.MockHttpHealthChecks) error {
|
||||
c.MockHttpHealthChecks.UpdateHook = func(ctx context.Context, key *meta.Key, obj *compute.HttpHealthCheck, m *cloud.MockHttpHealthChecks) error {
|
||||
m.Objects[*key] = &cloud.MockHttpHealthChecksObj{Obj: obj}
|
||||
return nil
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -77,7 +76,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
testCleanups []cleanupFuncs
|
||||
pods []*v1.Pod
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
sc map[string]*storage.StorageClass
|
||||
sc map[string]*storagev1.StorageClass
|
||||
driver testsuites.TestDriver
|
||||
nodeLabel map[string]string
|
||||
provisioner string
|
||||
@ -91,7 +90,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
init := func(tp testParameters) {
|
||||
m = mockDriverSetup{
|
||||
cs: f.ClientSet,
|
||||
sc: make(map[string]*storage.StorageClass),
|
||||
sc: make(map[string]*storagev1.StorageClass),
|
||||
tp: tp,
|
||||
}
|
||||
cs := f.ClientSet
|
||||
@ -132,7 +131,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
}
|
||||
}
|
||||
|
||||
createPod := func() (*storage.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||
createPod := func() (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||
By("Creating pod")
|
||||
var sc *storagev1.StorageClass
|
||||
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -44,7 +43,7 @@ import (
|
||||
type StorageClassTest struct {
|
||||
Client clientset.Interface
|
||||
Claim *v1.PersistentVolumeClaim
|
||||
Class *storage.StorageClass
|
||||
Class *storagev1.StorageClass
|
||||
Name string
|
||||
CloudProviders []string
|
||||
Provisioner string
|
||||
@ -89,7 +88,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
||||
testCase *StorageClassTest
|
||||
cs clientset.Interface
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
sc *storage.StorageClass
|
||||
sc *storagev1.StorageClass
|
||||
|
||||
intreeOps opCounts
|
||||
migratedOps opCounts
|
||||
@ -615,7 +614,7 @@ func prepareDataSourceForProvisioning(
|
||||
client clientset.Interface,
|
||||
dynamicClient dynamic.Interface,
|
||||
initClaim *v1.PersistentVolumeClaim,
|
||||
class *storage.StorageClass,
|
||||
class *storagev1.StorageClass,
|
||||
snapshotClass *unstructured.Unstructured,
|
||||
) (*v1.TypedLocalObjectReference, func()) {
|
||||
var err error
|
||||
|
@ -48,7 +48,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/remote"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -353,7 +352,7 @@ func logKubeletLatencyMetrics(metricNames ...string) {
|
||||
for _, key := range metricNames {
|
||||
metricSet.Insert(kubeletmetrics.KubeletSubsystem + "_" + key)
|
||||
}
|
||||
metric, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
|
||||
metric, err := frameworkmetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
|
||||
if err != nil {
|
||||
framework.Logf("Error getting kubelet metrics: %v", err)
|
||||
} else {
|
||||
@ -364,12 +363,12 @@ func logKubeletLatencyMetrics(metricNames ...string) {
|
||||
// returns config related metrics from the local kubelet, filtered to the filterMetricNames passed in
|
||||
func getKubeletMetrics(filterMetricNames sets.String) (frameworkmetrics.KubeletMetrics, error) {
|
||||
// grab Kubelet metrics
|
||||
ms, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
|
||||
ms, err := frameworkmetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filtered := metrics.NewKubeletMetrics()
|
||||
filtered := frameworkmetrics.NewKubeletMetrics()
|
||||
for name := range ms {
|
||||
if !filterMetricNames.Has(name) {
|
||||
continue
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@ -83,8 +82,8 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) {
|
||||
namespaceObject := framework.CreateTestingNamespace("retry-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(namespaceObject, s, t)
|
||||
|
||||
getListFunc := func(c *kubernetes.Clientset, secret *v1.Secret) func(options metav1.ListOptions) *v1.SecretList {
|
||||
return func(options metav1.ListOptions) *v1.SecretList {
|
||||
getListFunc := func(c *kubernetes.Clientset, secret *corev1.Secret) func(options metav1.ListOptions) *corev1.SecretList {
|
||||
return func(options metav1.ListOptions) *corev1.SecretList {
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", secret.Name).String()
|
||||
res, err := c.CoreV1().Secrets(secret.Namespace).List(options)
|
||||
if err != nil {
|
||||
@ -94,7 +93,7 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
getWatchFunc := func(c *kubernetes.Clientset, secret *v1.Secret) func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
getWatchFunc := func(c *kubernetes.Clientset, secret *corev1.Secret) func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", secret.Name).String()
|
||||
res, err := c.CoreV1().Secrets(secret.Namespace).Watch(options)
|
||||
@ -105,7 +104,7 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
generateEvents := func(t *testing.T, c *kubernetes.Clientset, secret *v1.Secret, referenceOutput *[]string, stopChan chan struct{}, stoppedChan chan struct{}) {
|
||||
generateEvents := func(t *testing.T, c *kubernetes.Clientset, secret *corev1.Secret, referenceOutput *[]string, stopChan chan struct{}, stoppedChan chan struct{}) {
|
||||
defer close(stoppedChan)
|
||||
counter := 0
|
||||
|
||||
@ -137,8 +136,8 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) {
|
||||
}
|
||||
|
||||
initialCount := "0"
|
||||
newTestSecret := func(name string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
newTestSecret := func(name string) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespaceObject.Name,
|
||||
@ -155,15 +154,15 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) {
|
||||
tt := []struct {
|
||||
name string
|
||||
succeed bool
|
||||
secret *v1.Secret
|
||||
getWatcher func(c *kubernetes.Clientset, secret *v1.Secret) (watch.Interface, error, func())
|
||||
secret *corev1.Secret
|
||||
getWatcher func(c *kubernetes.Clientset, secret *corev1.Secret) (watch.Interface, error, func())
|
||||
normalizeOutputFunc func(referenceOutput []string) []string
|
||||
}{
|
||||
{
|
||||
name: "regular watcher should fail",
|
||||
succeed: false,
|
||||
secret: newTestSecret("secret-01"),
|
||||
getWatcher: func(c *kubernetes.Clientset, secret *v1.Secret) (watch.Interface, error, func()) {
|
||||
getWatcher: func(c *kubernetes.Clientset, secret *corev1.Secret) (watch.Interface, error, func()) {
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: secret.ResourceVersion,
|
||||
}
|
||||
@ -176,7 +175,7 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) {
|
||||
name: "RetryWatcher survives closed watches",
|
||||
succeed: true,
|
||||
secret: newTestSecret("secret-02"),
|
||||
getWatcher: func(c *kubernetes.Clientset, secret *v1.Secret) (watch.Interface, error, func()) {
|
||||
getWatcher: func(c *kubernetes.Clientset, secret *corev1.Secret) (watch.Interface, error, func()) {
|
||||
lw := &cache.ListWatch{
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return getWatchFunc(c, secret)(options)
|
||||
@ -191,7 +190,7 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) {
|
||||
name: "InformerWatcher survives closed watches",
|
||||
succeed: true,
|
||||
secret: newTestSecret("secret-03"),
|
||||
getWatcher: func(c *kubernetes.Clientset, secret *v1.Secret) (watch.Interface, error, func()) {
|
||||
getWatcher: func(c *kubernetes.Clientset, secret *corev1.Secret) (watch.Interface, error, func()) {
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return getListFunc(c, secret)(options), nil
|
||||
@ -200,7 +199,7 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) {
|
||||
return getWatchFunc(c, secret)(options)
|
||||
},
|
||||
}
|
||||
_, _, w, done := watchtools.NewIndexerInformerWatcher(lw, &v1.Secret{})
|
||||
_, _, w, done := watchtools.NewIndexerInformerWatcher(lw, &corev1.Secret{})
|
||||
return w, nil, func() { <-done }
|
||||
},
|
||||
normalizeOutputFunc: normalizeInformerOutputFunc(initialCount),
|
||||
@ -237,7 +236,7 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) {
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
|
||||
s, ok := event.Object.(*v1.Secret)
|
||||
s, ok := event.Object.(*corev1.Secret)
|
||||
if !ok {
|
||||
t.Fatalf("Received an object that is not a Secret: %#v", event.Object)
|
||||
}
|
||||
|
@ -34,7 +34,6 @@ import (
|
||||
admissionv1beta1 "k8s.io/api/admissionregistration/v1beta1"
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -46,7 +45,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
dynamic "k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
@ -66,7 +64,7 @@ type testContext struct {
|
||||
admissionHolder *holder
|
||||
|
||||
client dynamic.Interface
|
||||
clientset kubernetes.Interface
|
||||
clientset clientset.Interface
|
||||
verb string
|
||||
gvr schema.GroupVersionResource
|
||||
resource metav1.APIResource
|
||||
@ -338,7 +336,7 @@ func TestWebhookV1beta1(t *testing.T) {
|
||||
})
|
||||
defer master.Cleanup()
|
||||
|
||||
if _, err := master.Client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil {
|
||||
if _, err := master.Client.CoreV1().Namespaces().Create(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := createV1beta1MutationWebhook(master.Client, webhookServer.URL+"/"+mutation); err != nil {
|
||||
|
@ -38,7 +38,6 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
externalclientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
@ -187,7 +186,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
getVolumeAttachment := func(client externalclientset.Interface) func() error {
|
||||
getVolumeAttachment := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.StorageV1().VolumeAttachments().Get("myattachment", metav1.GetOptions{})
|
||||
return err
|
||||
@ -398,13 +397,13 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
getNode1CSINode := func(client externalclientset.Interface) func() error {
|
||||
getNode1CSINode := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.StorageV1beta1().CSINodes().Get("node1", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
createNode1CSINode := func(client externalclientset.Interface) func() error {
|
||||
createNode1CSINode := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
nodeInfo := &storagev1beta1.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -424,7 +423,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
updateNode1CSINode := func(client externalclientset.Interface) func() error {
|
||||
updateNode1CSINode := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
nodeInfo, err := client.StorageV1beta1().CSINodes().Get("node1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -441,7 +440,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
patchNode1CSINode := func(client externalclientset.Interface) func() error {
|
||||
patchNode1CSINode := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
bs := []byte(fmt.Sprintf(`{"csiDrivers": [ { "driver": "net.example.storage.driver2", "nodeID": "net.example.storage/node1", "topologyKeys": [ "net.example.storage/region" ] } ] }`))
|
||||
// StrategicMergePatch is unsupported by CRs. Falling back to MergePatch
|
||||
@ -449,7 +448,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
deleteNode1CSINode := func(client externalclientset.Interface) func() error {
|
||||
deleteNode1CSINode := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
return client.StorageV1beta1().CSINodes().Delete("node1", &metav1.DeleteOptions{})
|
||||
}
|
||||
|
@ -44,7 +44,6 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
externalclientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/client-go/transport"
|
||||
@ -74,10 +73,10 @@ func clientForToken(user string) *http.Client {
|
||||
}
|
||||
}
|
||||
|
||||
func clientsetForToken(user string, config *restclient.Config) (clientset.Interface, externalclientset.Interface) {
|
||||
func clientsetForToken(user string, config *restclient.Config) (clientset.Interface, clientset.Interface) {
|
||||
configCopy := *config
|
||||
configCopy.BearerToken = user
|
||||
return clientset.NewForConfigOrDie(&configCopy), externalclientset.NewForConfigOrDie(&configCopy)
|
||||
return clientset.NewForConfigOrDie(&configCopy), clientset.NewForConfigOrDie(&configCopy)
|
||||
}
|
||||
|
||||
func crdClientsetForToken(user string, config *restclient.Config) apiextensionsclient.Interface {
|
||||
|
@ -28,17 +28,15 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/controller/cronjob"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *cronjob.Controller, *job.JobController, informers.SharedInformerFactory, clientset.Interface, rest.Config) {
|
||||
func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *cronjob.Controller, *job.JobController, informers.SharedInformerFactory, clientset.Interface, restclient.Config) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, server, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
||||
@ -96,7 +94,7 @@ func cleanupCronJobs(t *testing.T, cjClient clientbatchv1beta1.CronJobInterface,
|
||||
}
|
||||
}
|
||||
|
||||
func validateJobAndPod(t *testing.T, clientSet kubernetes.Interface, namespace string) {
|
||||
func validateJobAndPod(t *testing.T, clientSet clientset.Interface, namespace string) {
|
||||
if err := wait.PollImmediate(1*time.Second, 120*time.Second, func() (bool, error) {
|
||||
jobs, err := clientSet.BatchV1().Jobs(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
|
@ -35,8 +35,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appstyped "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corev1typed "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
@ -49,7 +48,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
@ -139,7 +137,7 @@ func setupScheduler(
|
||||
legacyscheme.Scheme,
|
||||
v1.EventSource{Component: v1.DefaultSchedulerName},
|
||||
)
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{
|
||||
eventBroadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{
|
||||
Interface: cs.CoreV1().Events(""),
|
||||
})
|
||||
|
||||
@ -292,7 +290,7 @@ func newNode(name string, label map[string]string) *v1.Node {
|
||||
}
|
||||
}
|
||||
|
||||
func addNodes(nodeClient corev1typed.NodeInterface, startIndex, numNodes int, label map[string]string, t *testing.T) {
|
||||
func addNodes(nodeClient corev1client.NodeInterface, startIndex, numNodes int, label map[string]string, t *testing.T) {
|
||||
for i := startIndex; i < startIndex+numNodes; i++ {
|
||||
_, err := nodeClient.Create(newNode(fmt.Sprintf("node-%d", i), label))
|
||||
if err != nil {
|
||||
@ -302,7 +300,7 @@ func addNodes(nodeClient corev1typed.NodeInterface, startIndex, numNodes int, la
|
||||
}
|
||||
|
||||
func validateDaemonSetPodsAndMarkReady(
|
||||
podClient corev1typed.PodInterface,
|
||||
podClient corev1client.PodInterface,
|
||||
podInformer cache.SharedIndexInformer,
|
||||
numberPods int,
|
||||
t *testing.T,
|
||||
@ -447,7 +445,7 @@ func validateDaemonSetStatus(
|
||||
}
|
||||
}
|
||||
|
||||
func validateFailedPlacementEvent(eventClient corev1typed.EventInterface, t *testing.T) {
|
||||
func validateFailedPlacementEvent(eventClient corev1client.EventInterface, t *testing.T) {
|
||||
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
eventList, err := eventClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
|
@ -34,8 +34,6 @@ import (
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
api "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -637,7 +635,7 @@ func TestAccept(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func countEndpoints(eps *api.Endpoints) int {
|
||||
func countEndpoints(eps *corev1.Endpoints) int {
|
||||
count := 0
|
||||
for i := range eps.Subsets {
|
||||
count += len(eps.Subsets[i].Addresses) * len(eps.Subsets[i].Ports)
|
||||
@ -693,14 +691,14 @@ func TestServiceAlloc(t *testing.T) {
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
||||
|
||||
svc := func(i int) *api.Service {
|
||||
return &api.Service{
|
||||
svc := func(i int) *corev1.Service {
|
||||
return &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("svc-%v", i),
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Type: api.ServiceTypeClusterIP,
|
||||
Ports: []api.ServicePort{
|
||||
Spec: corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Ports: []corev1.ServicePort{
|
||||
{Port: 80},
|
||||
},
|
||||
},
|
||||
@ -778,7 +776,7 @@ func TestUpdateNodeObjects(t *testing.T) {
|
||||
|
||||
for i := 0; i < nodes*6; i++ {
|
||||
c.Nodes().Delete(fmt.Sprintf("node-%d", i), nil)
|
||||
_, err := c.Nodes().Create(&v1.Node{
|
||||
_, err := c.Nodes().Create(&corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("node-%d", i),
|
||||
},
|
||||
@ -811,7 +809,7 @@ func TestUpdateNodeObjects(t *testing.T) {
|
||||
i := 0
|
||||
for r := range w.ResultChan() {
|
||||
i++
|
||||
if _, ok := r.Object.(*v1.Node); !ok {
|
||||
if _, ok := r.Object.(*corev1.Node); !ok {
|
||||
fmt.Printf("[watch:%d] unexpected object after %d: %#v\n", lister, i, r)
|
||||
}
|
||||
if i%100 == 0 {
|
||||
@ -866,24 +864,24 @@ func TestUpdateNodeObjects(t *testing.T) {
|
||||
switch {
|
||||
case i%4 == 0:
|
||||
lastCount = 1
|
||||
n.Status.Conditions = []v1.NodeCondition{
|
||||
n.Status.Conditions = []corev1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Type: corev1.NodeReady,
|
||||
Status: corev1.ConditionTrue,
|
||||
Reason: "foo",
|
||||
},
|
||||
}
|
||||
case i%4 == 1:
|
||||
lastCount = 2
|
||||
n.Status.Conditions = []v1.NodeCondition{
|
||||
n.Status.Conditions = []corev1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Type: corev1.NodeReady,
|
||||
Status: corev1.ConditionFalse,
|
||||
Reason: "foo",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
Type: corev1.NodeDiskPressure,
|
||||
Status: corev1.ConditionTrue,
|
||||
Reason: "bar",
|
||||
},
|
||||
}
|
||||
|
@ -44,7 +44,6 @@ import (
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@ -375,7 +374,7 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
// Root client
|
||||
// TODO: remove rootClient after we refactor pkg/admission to use the clientset.
|
||||
rootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}, BearerToken: rootToken})
|
||||
externalRootClientset := kubernetes.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}, BearerToken: rootToken})
|
||||
externalRootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}, BearerToken: rootToken})
|
||||
|
||||
externalInformers := informers.NewSharedInformerFactory(externalRootClientset, controller.NoResyncPeriodFunc())
|
||||
informers := informers.NewSharedInformerFactory(rootClientset, controller.NoResyncPeriodFunc())
|
||||
|
@ -18,7 +18,6 @@ package utils
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
@ -55,7 +54,7 @@ func EncodeCertPEM(cert *x509.Certificate) []byte {
|
||||
|
||||
// NewSignedCert creates a signed certificate using the given CA certificate and key
|
||||
func NewSignedCert(cfg *certutil.Config, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer) (*x509.Certificate, error) {
|
||||
serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64))
|
||||
serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user