mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
kubeadm: use t.Run in selfhosting and update phases
Used T.Run API for kubeadm tests in app/phases/selfhosting and app/phases/update directories This should improve testing output and make it more visible which test is doing what.
This commit is contained in:
parent
716b253963
commit
442098bdec
@ -27,11 +27,13 @@ import (
|
|||||||
|
|
||||||
func TestMutatePodSpec(t *testing.T) {
|
func TestMutatePodSpec(t *testing.T) {
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
|
name string
|
||||||
component string
|
component string
|
||||||
podSpec *v1.PodSpec
|
podSpec *v1.PodSpec
|
||||||
expected v1.PodSpec
|
expected v1.PodSpec
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
name: "mutate api server podspec",
|
||||||
component: kubeadmconstants.KubeAPIServer,
|
component: kubeadmconstants.KubeAPIServer,
|
||||||
podSpec: &v1.PodSpec{
|
podSpec: &v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
@ -73,6 +75,7 @@ func TestMutatePodSpec(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "mutate controller manager podspec",
|
||||||
component: kubeadmconstants.KubeControllerManager,
|
component: kubeadmconstants.KubeControllerManager,
|
||||||
podSpec: &v1.PodSpec{},
|
podSpec: &v1.PodSpec{},
|
||||||
expected: v1.PodSpec{
|
expected: v1.PodSpec{
|
||||||
@ -86,6 +89,7 @@ func TestMutatePodSpec(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "mutate scheduler podspec",
|
||||||
component: kubeadmconstants.KubeScheduler,
|
component: kubeadmconstants.KubeScheduler,
|
||||||
podSpec: &v1.PodSpec{},
|
podSpec: &v1.PodSpec{},
|
||||||
expected: v1.PodSpec{
|
expected: v1.PodSpec{
|
||||||
@ -101,20 +105,24 @@ func TestMutatePodSpec(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
mutatePodSpec(GetDefaultMutators(), rt.component, rt.podSpec)
|
t.Run(rt.name, func(t *testing.T) {
|
||||||
|
mutatePodSpec(GetDefaultMutators(), rt.component, rt.podSpec)
|
||||||
|
|
||||||
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
||||||
t.Errorf("failed mutatePodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
t.Errorf("failed mutatePodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAddNodeSelectorToPodSpec(t *testing.T) {
|
func TestAddNodeSelectorToPodSpec(t *testing.T) {
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
|
name string
|
||||||
podSpec *v1.PodSpec
|
podSpec *v1.PodSpec
|
||||||
expected v1.PodSpec
|
expected v1.PodSpec
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
name: "empty podspec",
|
||||||
podSpec: &v1.PodSpec{},
|
podSpec: &v1.PodSpec{},
|
||||||
expected: v1.PodSpec{
|
expected: v1.PodSpec{
|
||||||
NodeSelector: map[string]string{
|
NodeSelector: map[string]string{
|
||||||
@ -123,6 +131,7 @@ func TestAddNodeSelectorToPodSpec(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "podspec with a valid node selector",
|
||||||
podSpec: &v1.PodSpec{
|
podSpec: &v1.PodSpec{
|
||||||
NodeSelector: map[string]string{
|
NodeSelector: map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@ -138,20 +147,24 @@ func TestAddNodeSelectorToPodSpec(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
addNodeSelectorToPodSpec(rt.podSpec)
|
t.Run(rt.name, func(t *testing.T) {
|
||||||
|
addNodeSelectorToPodSpec(rt.podSpec)
|
||||||
|
|
||||||
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
||||||
t.Errorf("failed addNodeSelectorToPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
t.Errorf("failed addNodeSelectorToPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetMasterTolerationOnPodSpec(t *testing.T) {
|
func TestSetMasterTolerationOnPodSpec(t *testing.T) {
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
|
name string
|
||||||
podSpec *v1.PodSpec
|
podSpec *v1.PodSpec
|
||||||
expected v1.PodSpec
|
expected v1.PodSpec
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
name: "empty podspec",
|
||||||
podSpec: &v1.PodSpec{},
|
podSpec: &v1.PodSpec{},
|
||||||
expected: v1.PodSpec{
|
expected: v1.PodSpec{
|
||||||
Tolerations: []v1.Toleration{
|
Tolerations: []v1.Toleration{
|
||||||
@ -160,6 +173,7 @@ func TestSetMasterTolerationOnPodSpec(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "podspec with a valid toleration",
|
||||||
podSpec: &v1.PodSpec{
|
podSpec: &v1.PodSpec{
|
||||||
Tolerations: []v1.Toleration{
|
Tolerations: []v1.Toleration{
|
||||||
{Key: "foo", Value: "bar"},
|
{Key: "foo", Value: "bar"},
|
||||||
@ -175,26 +189,31 @@ func TestSetMasterTolerationOnPodSpec(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
setMasterTolerationOnPodSpec(rt.podSpec)
|
t.Run(rt.name, func(t *testing.T) {
|
||||||
|
setMasterTolerationOnPodSpec(rt.podSpec)
|
||||||
|
|
||||||
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
||||||
t.Errorf("failed setMasterTolerationOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
t.Errorf("failed setMasterTolerationOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetRightDNSPolicyOnPodSpec(t *testing.T) {
|
func TestSetRightDNSPolicyOnPodSpec(t *testing.T) {
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
|
name string
|
||||||
podSpec *v1.PodSpec
|
podSpec *v1.PodSpec
|
||||||
expected v1.PodSpec
|
expected v1.PodSpec
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
name: "empty podspec",
|
||||||
podSpec: &v1.PodSpec{},
|
podSpec: &v1.PodSpec{},
|
||||||
expected: v1.PodSpec{
|
expected: v1.PodSpec{
|
||||||
DNSPolicy: v1.DNSClusterFirstWithHostNet,
|
DNSPolicy: v1.DNSClusterFirstWithHostNet,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "podspec with a v1.DNSClusterFirst policy",
|
||||||
podSpec: &v1.PodSpec{
|
podSpec: &v1.PodSpec{
|
||||||
DNSPolicy: v1.DNSClusterFirst,
|
DNSPolicy: v1.DNSClusterFirst,
|
||||||
},
|
},
|
||||||
@ -205,20 +224,24 @@ func TestSetRightDNSPolicyOnPodSpec(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
setRightDNSPolicyOnPodSpec(rt.podSpec)
|
t.Run(rt.name, func(t *testing.T) {
|
||||||
|
setRightDNSPolicyOnPodSpec(rt.podSpec)
|
||||||
|
|
||||||
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
||||||
t.Errorf("failed setRightDNSPolicyOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
t.Errorf("failed setRightDNSPolicyOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetHostIPOnPodSpec(t *testing.T) {
|
func TestSetHostIPOnPodSpec(t *testing.T) {
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
|
name string
|
||||||
podSpec *v1.PodSpec
|
podSpec *v1.PodSpec
|
||||||
expected v1.PodSpec
|
expected v1.PodSpec
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
name: "set HOST_IP env var on a podspec",
|
||||||
podSpec: &v1.PodSpec{
|
podSpec: &v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
@ -254,21 +277,25 @@ func TestSetHostIPOnPodSpec(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
setHostIPOnPodSpec(rt.podSpec)
|
t.Run(rt.name, func(t *testing.T) {
|
||||||
|
setHostIPOnPodSpec(rt.podSpec)
|
||||||
|
|
||||||
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
||||||
t.Errorf("failed setHostIPOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
t.Errorf("failed setHostIPOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetSelfHostedVolumesForAPIServer(t *testing.T) {
|
func TestSetSelfHostedVolumesForAPIServer(t *testing.T) {
|
||||||
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
|
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
|
name string
|
||||||
podSpec *v1.PodSpec
|
podSpec *v1.PodSpec
|
||||||
expected v1.PodSpec
|
expected v1.PodSpec
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
name: "set selfhosted volumes for api server",
|
||||||
podSpec: &v1.PodSpec{
|
podSpec: &v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
@ -346,13 +373,15 @@ func TestSetSelfHostedVolumesForAPIServer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
setSelfHostedVolumesForAPIServer(rt.podSpec)
|
t.Run(rt.name, func(t *testing.T) {
|
||||||
sort.Strings(rt.podSpec.Containers[0].Command)
|
setSelfHostedVolumesForAPIServer(rt.podSpec)
|
||||||
sort.Strings(rt.expected.Containers[0].Command)
|
sort.Strings(rt.podSpec.Containers[0].Command)
|
||||||
|
sort.Strings(rt.expected.Containers[0].Command)
|
||||||
|
|
||||||
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
||||||
t.Errorf("failed setSelfHostedVolumesForAPIServer:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
t.Errorf("failed setSelfHostedVolumesForAPIServer:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -360,10 +389,12 @@ func TestSetSelfHostedVolumesForControllerManager(t *testing.T) {
|
|||||||
hostPathFileOrCreate := v1.HostPathFileOrCreate
|
hostPathFileOrCreate := v1.HostPathFileOrCreate
|
||||||
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
|
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
|
name string
|
||||||
podSpec *v1.PodSpec
|
podSpec *v1.PodSpec
|
||||||
expected v1.PodSpec
|
expected v1.PodSpec
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
name: "set selfhosted volumes for controller mananger",
|
||||||
podSpec: &v1.PodSpec{
|
podSpec: &v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
@ -464,23 +495,27 @@ func TestSetSelfHostedVolumesForControllerManager(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
setSelfHostedVolumesForControllerManager(rt.podSpec)
|
t.Run(rt.name, func(t *testing.T) {
|
||||||
sort.Strings(rt.podSpec.Containers[0].Command)
|
setSelfHostedVolumesForControllerManager(rt.podSpec)
|
||||||
sort.Strings(rt.expected.Containers[0].Command)
|
sort.Strings(rt.podSpec.Containers[0].Command)
|
||||||
|
sort.Strings(rt.expected.Containers[0].Command)
|
||||||
|
|
||||||
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
||||||
t.Errorf("failed setSelfHostedVolumesForControllerManager:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
t.Errorf("failed setSelfHostedVolumesForControllerManager:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetSelfHostedVolumesForScheduler(t *testing.T) {
|
func TestSetSelfHostedVolumesForScheduler(t *testing.T) {
|
||||||
hostPathFileOrCreate := v1.HostPathFileOrCreate
|
hostPathFileOrCreate := v1.HostPathFileOrCreate
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
|
name string
|
||||||
podSpec *v1.PodSpec
|
podSpec *v1.PodSpec
|
||||||
expected v1.PodSpec
|
expected v1.PodSpec
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
name: "set selfhosted volumes for scheduler",
|
||||||
podSpec: &v1.PodSpec{
|
podSpec: &v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
@ -534,12 +569,14 @@ func TestSetSelfHostedVolumesForScheduler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
setSelfHostedVolumesForScheduler(rt.podSpec)
|
t.Run(rt.name, func(t *testing.T) {
|
||||||
sort.Strings(rt.podSpec.Containers[0].Command)
|
setSelfHostedVolumesForScheduler(rt.podSpec)
|
||||||
sort.Strings(rt.expected.Containers[0].Command)
|
sort.Strings(rt.podSpec.Containers[0].Command)
|
||||||
|
sort.Strings(rt.expected.Containers[0].Command)
|
||||||
|
|
||||||
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
|
||||||
t.Errorf("failed setSelfHostedVolumesForScheduler:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
t.Errorf("failed setSelfHostedVolumesForScheduler:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -488,41 +488,44 @@ func TestBuildDaemonSet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
tempFile, err := createTempFileWithContent(rt.podBytes)
|
t.Run(rt.component, func(t *testing.T) {
|
||||||
if err != nil {
|
tempFile, err := createTempFileWithContent(rt.podBytes)
|
||||||
t.Errorf("error creating tempfile with content:%v", err)
|
if err != nil {
|
||||||
}
|
t.Errorf("error creating tempfile with content:%v", err)
|
||||||
defer os.Remove(tempFile)
|
}
|
||||||
|
defer os.Remove(tempFile)
|
||||||
|
|
||||||
podSpec, err := loadPodSpecFromFile(tempFile)
|
podSpec, err := loadPodSpecFromFile(tempFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("couldn't load the specified Pod Spec")
|
t.Fatalf("couldn't load the specified Pod Spec")
|
||||||
}
|
}
|
||||||
|
|
||||||
ds := BuildDaemonSet(rt.component, podSpec, GetDefaultMutators())
|
ds := BuildDaemonSet(rt.component, podSpec, GetDefaultMutators())
|
||||||
dsBytes, err := util.MarshalToYaml(ds, apps.SchemeGroupVersion)
|
dsBytes, err := util.MarshalToYaml(ds, apps.SchemeGroupVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to marshal daemonset to YAML: %v", err)
|
t.Fatalf("failed to marshal daemonset to YAML: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(dsBytes, rt.dsBytes) {
|
if !bytes.Equal(dsBytes, rt.dsBytes) {
|
||||||
t.Errorf("failed TestBuildDaemonSet:\nexpected:\n%s\nsaw:\n%s", rt.dsBytes, dsBytes)
|
t.Errorf("failed TestBuildDaemonSet:\nexpected:\n%s\nsaw:\n%s", rt.dsBytes, dsBytes)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadPodSpecFromFile(t *testing.T) {
|
func TestLoadPodSpecFromFile(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
name string
|
||||||
content string
|
content string
|
||||||
expectError bool
|
expectError bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
// No content
|
name: "no content",
|
||||||
content: "",
|
content: "",
|
||||||
expectError: true,
|
expectError: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Good YAML
|
name: "valid YAML",
|
||||||
content: `
|
content: `
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
@ -535,7 +538,7 @@ spec:
|
|||||||
expectError: false,
|
expectError: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Good JSON
|
name: "valid JSON",
|
||||||
content: `
|
content: `
|
||||||
{
|
{
|
||||||
"apiVersion": "v1",
|
"apiVersion": "v1",
|
||||||
@ -554,7 +557,7 @@ spec:
|
|||||||
expectError: false,
|
expectError: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Bad PodSpec
|
name: "incorrect PodSpec",
|
||||||
content: `
|
content: `
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
@ -568,22 +571,26 @@ spec:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
tempFile, err := createTempFileWithContent([]byte(rt.content))
|
t.Run(rt.name, func(t *testing.T) {
|
||||||
if err != nil {
|
tempFile, err := createTempFileWithContent([]byte(rt.content))
|
||||||
t.Errorf("error creating tempfile with content:%v", err)
|
if err != nil {
|
||||||
}
|
t.Errorf("error creating tempfile with content:%v", err)
|
||||||
defer os.Remove(tempFile)
|
}
|
||||||
|
defer os.Remove(tempFile)
|
||||||
|
|
||||||
_, err = loadPodSpecFromFile(tempFile)
|
_, err = loadPodSpecFromFile(tempFile)
|
||||||
if (err != nil) != rt.expectError {
|
if (err != nil) != rt.expectError {
|
||||||
t.Errorf("failed TestLoadPodSpecFromFile:\nexpected error:\n%t\nsaw:\n%v", rt.expectError, err)
|
t.Errorf("failed TestLoadPodSpecFromFile:\nexpected error:\n%t\nsaw:\n%v", rt.expectError, err)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := loadPodSpecFromFile("")
|
t.Run("empty file name", func(t *testing.T) {
|
||||||
if err == nil {
|
_, err := loadPodSpecFromFile("")
|
||||||
t.Error("unexpected success: loadPodSpecFromFile should return error when no file is given")
|
if err == nil {
|
||||||
}
|
t.Error("unexpected success: loadPodSpecFromFile should return error when no file is given")
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func createTempFileWithContent(content []byte) (string, error) {
|
func createTempFileWithContent(content []byte) (string, error) {
|
||||||
|
@ -781,18 +781,21 @@ func TestGetAvailableUpgrades(t *testing.T) {
|
|||||||
|
|
||||||
func TestKubeletUpgrade(t *testing.T) {
|
func TestKubeletUpgrade(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
name string
|
||||||
before map[string]uint16
|
before map[string]uint16
|
||||||
after string
|
after string
|
||||||
expected bool
|
expected bool
|
||||||
}{
|
}{
|
||||||
{ // upgrade available
|
{
|
||||||
|
name: "upgrade from v1.10.1 to v1.10.3 is available",
|
||||||
before: map[string]uint16{
|
before: map[string]uint16{
|
||||||
"v1.10.1": 1,
|
"v1.10.1": 1,
|
||||||
},
|
},
|
||||||
after: "v1.10.3",
|
after: "v1.10.3",
|
||||||
expected: true,
|
expected: true,
|
||||||
},
|
},
|
||||||
{ // upgrade available
|
{
|
||||||
|
name: "upgrade from v1.10.1 and v1.10.3/100 to v1.10.3 is available",
|
||||||
before: map[string]uint16{
|
before: map[string]uint16{
|
||||||
"v1.10.1": 1,
|
"v1.10.1": 1,
|
||||||
"v1.10.3": 100,
|
"v1.10.3": 100,
|
||||||
@ -800,21 +803,24 @@ func TestKubeletUpgrade(t *testing.T) {
|
|||||||
after: "v1.10.3",
|
after: "v1.10.3",
|
||||||
expected: true,
|
expected: true,
|
||||||
},
|
},
|
||||||
{ // upgrade not available
|
{
|
||||||
|
name: "upgrade from v1.10.3 to v1.10.3 is not available",
|
||||||
before: map[string]uint16{
|
before: map[string]uint16{
|
||||||
"v1.10.3": 1,
|
"v1.10.3": 1,
|
||||||
},
|
},
|
||||||
after: "v1.10.3",
|
after: "v1.10.3",
|
||||||
expected: false,
|
expected: false,
|
||||||
},
|
},
|
||||||
{ // upgrade not available
|
{
|
||||||
|
name: "upgrade from v1.10.3/100 to v1.10.3 is not available",
|
||||||
before: map[string]uint16{
|
before: map[string]uint16{
|
||||||
"v1.10.3": 100,
|
"v1.10.3": 100,
|
||||||
},
|
},
|
||||||
after: "v1.10.3",
|
after: "v1.10.3",
|
||||||
expected: false,
|
expected: false,
|
||||||
},
|
},
|
||||||
{ // upgrade not available if we don't know anything about the earlier state
|
{
|
||||||
|
name: "upgrade is not available if we don't know anything about the earlier state",
|
||||||
before: map[string]uint16{},
|
before: map[string]uint16{},
|
||||||
after: "v1.10.3",
|
after: "v1.10.3",
|
||||||
expected: false,
|
expected: false,
|
||||||
@ -822,19 +828,20 @@ func TestKubeletUpgrade(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
|
t.Run(rt.name, func(t *testing.T) {
|
||||||
upgrade := Upgrade{
|
upgrade := Upgrade{
|
||||||
Before: ClusterState{
|
Before: ClusterState{
|
||||||
KubeletVersions: rt.before,
|
KubeletVersions: rt.before,
|
||||||
},
|
},
|
||||||
After: ClusterState{
|
After: ClusterState{
|
||||||
KubeVersion: rt.after,
|
KubeVersion: rt.after,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
actual := upgrade.CanUpgradeKubelets()
|
actual := upgrade.CanUpgradeKubelets()
|
||||||
if actual != rt.expected {
|
if actual != rt.expected {
|
||||||
t.Errorf("failed TestKubeletUpgrade\n\texpected: %t\n\tgot: %t\n\ttest object: %v", rt.expected, actual, upgrade)
|
t.Errorf("failed TestKubeletUpgrade\n\texpected: %t\n\tgot: %t\n\ttest object: %v", rt.expected, actual, upgrade)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -883,10 +890,11 @@ func TestGetBranchFromVersion(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
v := getBranchFromVersion(tc.version)
|
t.Run(tc.version, func(t *testing.T) {
|
||||||
if v != tc.expectedVersion {
|
v := getBranchFromVersion(tc.version)
|
||||||
t.Errorf("expected version %s, got %s", tc.expectedVersion, v)
|
if v != tc.expectedVersion {
|
||||||
}
|
t.Errorf("expected version %s, got %s", tc.expectedVersion, v)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -150,35 +150,37 @@ func TestShouldBackupAPIServerCertAndKey(t *testing.T) {
|
|||||||
expected: true,
|
expected: true,
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
tmpdir := testutil.SetupTempDir(t)
|
t.Run(desc, func(t *testing.T) {
|
||||||
defer os.RemoveAll(tmpdir)
|
tmpdir := testutil.SetupTempDir(t)
|
||||||
cfg.CertificatesDir = tmpdir
|
defer os.RemoveAll(tmpdir)
|
||||||
|
cfg.CertificatesDir = tmpdir
|
||||||
|
|
||||||
caCert, caKey, err := certsphase.KubeadmCertRootCA.CreateAsCA(cfg)
|
caCert, caKey, err := certsphase.KubeadmCertRootCA.CreateAsCA(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed creation of ca cert and key: %v", err)
|
t.Fatalf("failed creation of ca cert and key: %v", err)
|
||||||
}
|
|
||||||
caCert.NotBefore = caCert.NotBefore.Add(-test.adjustedExpiry).UTC()
|
|
||||||
|
|
||||||
err = certsphase.KubeadmCertAPIServer.CreateFromCA(cfg, caCert, caKey)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Test %s: failed creation of cert and key: %v", desc, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
certAndKey := []string{filepath.Join(tmpdir, constants.APIServerCertName), filepath.Join(tmpdir, constants.APIServerKeyName)}
|
|
||||||
for _, path := range certAndKey {
|
|
||||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
|
||||||
t.Fatalf("Test %s: %s not exist: %v", desc, path, err)
|
|
||||||
}
|
}
|
||||||
}
|
caCert.NotBefore = caCert.NotBefore.Add(-test.adjustedExpiry).UTC()
|
||||||
|
|
||||||
shouldBackup, err := shouldBackupAPIServerCertAndKey(tmpdir)
|
err = certsphase.KubeadmCertAPIServer.CreateFromCA(cfg, caCert, caKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %s: failed to check shouldBackupAPIServerCertAndKey: %v", desc, err)
|
t.Fatalf("Test %s: failed creation of cert and key: %v", desc, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if shouldBackup != test.expected {
|
certAndKey := []string{filepath.Join(tmpdir, constants.APIServerCertName), filepath.Join(tmpdir, constants.APIServerKeyName)}
|
||||||
t.Fatalf("Test %s: shouldBackupAPIServerCertAndKey expected %v, got %v", desc, test.expected, shouldBackup)
|
for _, path := range certAndKey {
|
||||||
}
|
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||||
|
t.Fatalf("Test %s: %s not exist: %v", desc, path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
shouldBackup, err := shouldBackupAPIServerCertAndKey(tmpdir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %s: failed to check shouldBackupAPIServerCertAndKey: %v", desc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if shouldBackup != test.expected {
|
||||||
|
t.Fatalf("Test %s: shouldBackupAPIServerCertAndKey expected %v, got %v", desc, test.expected, shouldBackup)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -108,26 +108,31 @@ func (p *goodPrepuller) DeleteFunc(component string) error {
|
|||||||
|
|
||||||
func TestPrepullImagesInParallel(t *testing.T) {
|
func TestPrepullImagesInParallel(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
name string
|
||||||
p Prepuller
|
p Prepuller
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
expectedErr bool
|
expectedErr bool
|
||||||
}{
|
}{
|
||||||
{ // should error out; create failed
|
{
|
||||||
|
name: "should error out; create failed",
|
||||||
p: NewFailedCreatePrepuller(),
|
p: NewFailedCreatePrepuller(),
|
||||||
timeout: 10 * time.Second,
|
timeout: 10 * time.Second,
|
||||||
expectedErr: true,
|
expectedErr: true,
|
||||||
},
|
},
|
||||||
{ // should error out; timeout exceeded
|
{
|
||||||
|
name: "should error out; timeout exceeded",
|
||||||
p: NewForeverWaitPrepuller(),
|
p: NewForeverWaitPrepuller(),
|
||||||
timeout: 10 * time.Second,
|
timeout: 10 * time.Second,
|
||||||
expectedErr: true,
|
expectedErr: true,
|
||||||
},
|
},
|
||||||
{ // should error out; delete failed
|
{
|
||||||
|
name: "should error out; delete failed",
|
||||||
p: NewFailedDeletePrepuller(),
|
p: NewFailedDeletePrepuller(),
|
||||||
timeout: 10 * time.Second,
|
timeout: 10 * time.Second,
|
||||||
expectedErr: true,
|
expectedErr: true,
|
||||||
},
|
},
|
||||||
{ // should work just fine
|
{
|
||||||
|
name: "should work just fine",
|
||||||
p: NewGoodPrepuller(),
|
p: NewGoodPrepuller(),
|
||||||
timeout: 10 * time.Second,
|
timeout: 10 * time.Second,
|
||||||
expectedErr: false,
|
expectedErr: false,
|
||||||
@ -135,14 +140,15 @@ func TestPrepullImagesInParallel(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
|
t.Run(rt.name, func(t *testing.T) {
|
||||||
actualErr := PrepullImagesInParallel(rt.p, rt.timeout, append(constants.MasterComponents, constants.Etcd))
|
actualErr := PrepullImagesInParallel(rt.p, rt.timeout, append(constants.MasterComponents, constants.Etcd))
|
||||||
if (actualErr != nil) != rt.expectedErr {
|
if (actualErr != nil) != rt.expectedErr {
|
||||||
t.Errorf(
|
t.Errorf(
|
||||||
"failed TestPrepullImagesInParallel\n\texpected error: %t\n\tgot: %t",
|
"failed TestPrepullImagesInParallel\n\texpected error: %t\n\tgot: %t",
|
||||||
rt.expectedErr,
|
rt.expectedErr,
|
||||||
(actualErr != nil),
|
(actualErr != nil),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -412,116 +412,117 @@ func TestStaticPodControlPlane(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
waiter := NewFakeStaticPodWaiter(rt.waitErrsToReturn)
|
t.Run(rt.description, func(t *testing.T) {
|
||||||
pathMgr, err := NewFakeStaticPodPathManager(rt.moveFileFunc)
|
waiter := NewFakeStaticPodWaiter(rt.waitErrsToReturn)
|
||||||
if err != nil {
|
pathMgr, err := NewFakeStaticPodPathManager(rt.moveFileFunc)
|
||||||
t.Fatalf("couldn't run NewFakeStaticPodPathManager: %v", err)
|
if err != nil {
|
||||||
}
|
t.Fatalf("couldn't run NewFakeStaticPodPathManager: %v", err)
|
||||||
defer os.RemoveAll(pathMgr.(*fakeStaticPodPathManager).KubernetesDir())
|
|
||||||
constants.KubernetesDir = pathMgr.(*fakeStaticPodPathManager).KubernetesDir()
|
|
||||||
|
|
||||||
tempCertsDir, err := ioutil.TempDir("", "kubeadm-certs")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("couldn't create temporary certificates directory: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tempCertsDir)
|
|
||||||
tmpEtcdDataDir, err := ioutil.TempDir("", "kubeadm-etcd-data")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("couldn't create temporary etcd data directory: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpEtcdDataDir)
|
|
||||||
|
|
||||||
oldcfg, err := getConfig(constants.MinimumControlPlaneVersion.String(), tempCertsDir, tmpEtcdDataDir)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("couldn't create config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tree, err := certsphase.GetCertsWithoutEtcd().AsMap().CertTree()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("couldn't get cert tree: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tree.CreateTree(oldcfg); err != nil {
|
|
||||||
t.Fatalf("couldn't get create cert tree: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Logf("Wrote certs to %s\n", oldcfg.CertificatesDir)
|
|
||||||
|
|
||||||
// Initialize the directory with v1.7 manifests; should then be upgraded to v1.8 using the method
|
|
||||||
err = controlplanephase.CreateInitStaticPodManifestFiles(pathMgr.RealManifestDir(), oldcfg)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("couldn't run CreateInitStaticPodManifestFiles: %v", err)
|
|
||||||
}
|
|
||||||
err = etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.RealManifestDir(), oldcfg)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("couldn't run CreateLocalEtcdStaticPodManifestFile: %v", err)
|
|
||||||
}
|
|
||||||
// Get a hash of the v1.7 API server manifest to compare later (was the file re-written)
|
|
||||||
oldHash, err := getAPIServerHash(pathMgr.RealManifestDir())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("couldn't read temp file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
newcfg, err := getConfig(constants.CurrentKubernetesVersion.String(), tempCertsDir, tmpEtcdDataDir)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("couldn't create config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the kubeadm etcd certs
|
|
||||||
caCert, caKey, err := certsphase.KubeadmCertEtcdCA.CreateAsCA(newcfg)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("couldn't create new CA certificate: %v", err)
|
|
||||||
}
|
|
||||||
for _, cert := range []*certsphase.KubeadmCert{
|
|
||||||
&certsphase.KubeadmCertEtcdServer,
|
|
||||||
&certsphase.KubeadmCertEtcdPeer,
|
|
||||||
&certsphase.KubeadmCertEtcdHealthcheck,
|
|
||||||
&certsphase.KubeadmCertEtcdAPIClient,
|
|
||||||
} {
|
|
||||||
if err := cert.CreateFromCA(newcfg, caCert, caKey); err != nil {
|
|
||||||
t.Fatalf("couldn't create certificate %s: %v", cert.Name, err)
|
|
||||||
}
|
}
|
||||||
}
|
defer os.RemoveAll(pathMgr.(*fakeStaticPodPathManager).KubernetesDir())
|
||||||
|
constants.KubernetesDir = pathMgr.(*fakeStaticPodPathManager).KubernetesDir()
|
||||||
|
|
||||||
actualErr := StaticPodControlPlane(
|
tempCertsDir, err := ioutil.TempDir("", "kubeadm-certs")
|
||||||
nil,
|
if err != nil {
|
||||||
waiter,
|
t.Fatalf("couldn't create temporary certificates directory: %v", err)
|
||||||
pathMgr,
|
}
|
||||||
newcfg,
|
defer os.RemoveAll(tempCertsDir)
|
||||||
true,
|
tmpEtcdDataDir, err := ioutil.TempDir("", "kubeadm-etcd-data")
|
||||||
fakeTLSEtcdClient{
|
if err != nil {
|
||||||
TLS: false,
|
t.Fatalf("couldn't create temporary etcd data directory: %v", err)
|
||||||
},
|
}
|
||||||
fakePodManifestEtcdClient{
|
defer os.RemoveAll(tmpEtcdDataDir)
|
||||||
ManifestDir: pathMgr.RealManifestDir(),
|
|
||||||
CertificatesDir: newcfg.CertificatesDir,
|
oldcfg, err := getConfig(constants.MinimumControlPlaneVersion.String(), tempCertsDir, tmpEtcdDataDir)
|
||||||
},
|
if err != nil {
|
||||||
)
|
t.Fatalf("couldn't create config: %v", err)
|
||||||
if (actualErr != nil) != rt.expectedErr {
|
}
|
||||||
t.Errorf(
|
|
||||||
"failed UpgradeStaticPodControlPlane\n%s\n\texpected error: %t\n\tgot: %t\n\tactual error: %v",
|
tree, err := certsphase.GetCertsWithoutEtcd().AsMap().CertTree()
|
||||||
rt.description,
|
if err != nil {
|
||||||
rt.expectedErr,
|
t.Fatalf("couldn't get cert tree: %v", err)
|
||||||
(actualErr != nil),
|
}
|
||||||
actualErr,
|
|
||||||
|
if err := tree.CreateTree(oldcfg); err != nil {
|
||||||
|
t.Fatalf("couldn't get create cert tree: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Wrote certs to %s\n", oldcfg.CertificatesDir)
|
||||||
|
|
||||||
|
// Initialize the directory with v1.7 manifests; should then be upgraded to v1.8 using the method
|
||||||
|
err = controlplanephase.CreateInitStaticPodManifestFiles(pathMgr.RealManifestDir(), oldcfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("couldn't run CreateInitStaticPodManifestFiles: %v", err)
|
||||||
|
}
|
||||||
|
err = etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.RealManifestDir(), oldcfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("couldn't run CreateLocalEtcdStaticPodManifestFile: %v", err)
|
||||||
|
}
|
||||||
|
// Get a hash of the v1.7 API server manifest to compare later (was the file re-written)
|
||||||
|
oldHash, err := getAPIServerHash(pathMgr.RealManifestDir())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("couldn't read temp file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newcfg, err := getConfig(constants.CurrentKubernetesVersion.String(), tempCertsDir, tmpEtcdDataDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("couldn't create config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// create the kubeadm etcd certs
|
||||||
|
caCert, caKey, err := certsphase.KubeadmCertEtcdCA.CreateAsCA(newcfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("couldn't create new CA certificate: %v", err)
|
||||||
|
}
|
||||||
|
for _, cert := range []*certsphase.KubeadmCert{
|
||||||
|
&certsphase.KubeadmCertEtcdServer,
|
||||||
|
&certsphase.KubeadmCertEtcdPeer,
|
||||||
|
&certsphase.KubeadmCertEtcdHealthcheck,
|
||||||
|
&certsphase.KubeadmCertEtcdAPIClient,
|
||||||
|
} {
|
||||||
|
if err := cert.CreateFromCA(newcfg, caCert, caKey); err != nil {
|
||||||
|
t.Fatalf("couldn't create certificate %s: %v", cert.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
actualErr := StaticPodControlPlane(
|
||||||
|
nil,
|
||||||
|
waiter,
|
||||||
|
pathMgr,
|
||||||
|
newcfg,
|
||||||
|
true,
|
||||||
|
fakeTLSEtcdClient{
|
||||||
|
TLS: false,
|
||||||
|
},
|
||||||
|
fakePodManifestEtcdClient{
|
||||||
|
ManifestDir: pathMgr.RealManifestDir(),
|
||||||
|
CertificatesDir: newcfg.CertificatesDir,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
}
|
if (actualErr != nil) != rt.expectedErr {
|
||||||
|
t.Errorf(
|
||||||
|
"failed UpgradeStaticPodControlPlane\n%s\n\texpected error: %t\n\tgot: %t\n\tactual error: %v",
|
||||||
|
rt.description,
|
||||||
|
rt.expectedErr,
|
||||||
|
(actualErr != nil),
|
||||||
|
actualErr,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
newHash, err := getAPIServerHash(pathMgr.RealManifestDir())
|
newHash, err := getAPIServerHash(pathMgr.RealManifestDir())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("couldn't read temp file: %v", err)
|
t.Fatalf("couldn't read temp file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (oldHash != newHash) != rt.manifestShouldChange {
|
if (oldHash != newHash) != rt.manifestShouldChange {
|
||||||
t.Errorf(
|
t.Errorf(
|
||||||
"failed StaticPodControlPlane\n%s\n\texpected manifest change: %t\n\tgot: %t\n\tnewHash: %v",
|
"failed StaticPodControlPlane\n%s\n\texpected manifest change: %t\n\tgot: %t\n\tnewHash: %v",
|
||||||
rt.description,
|
rt.description,
|
||||||
rt.manifestShouldChange,
|
rt.manifestShouldChange,
|
||||||
(oldHash != newHash),
|
(oldHash != newHash),
|
||||||
newHash,
|
newHash,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user