mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-09 12:07:47 +00:00
test: remove container runtime check and fix other nits
This commit is contained in:
parent
d72c7319f8
commit
3e6df4a871
@ -23,20 +23,20 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
|
||||||
"github.com/onsi/gomega"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/feature"
|
"k8s.io/kubernetes/test/e2e/feature"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -115,13 +115,7 @@ func removeExtendedResource(clientSet clientset.Interface, nodeName, extendedRes
|
|||||||
}).WithTimeout(30 * time.Second).WithPolling(time.Second).ShouldNot(gomega.HaveOccurred())
|
}).WithTimeout(30 * time.Second).WithPolling(time.Second).ShouldNot(gomega.HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
func doPodResizeTests() {
|
func doPodResizeTests(f *framework.Framework) {
|
||||||
f := framework.NewDefaultFramework("pod-resize-test")
|
|
||||||
var podClient *e2epod.PodClient
|
|
||||||
ginkgo.BeforeEach(func() {
|
|
||||||
podClient = e2epod.NewPodClient(f)
|
|
||||||
})
|
|
||||||
|
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
name string
|
name string
|
||||||
containers []e2epod.ResizableContainerInfo
|
containers []e2epod.ResizableContainerInfo
|
||||||
@ -861,13 +855,7 @@ func doPodResizeTests() {
|
|||||||
for idx := range tests {
|
for idx := range tests {
|
||||||
tc := tests[idx]
|
tc := tests[idx]
|
||||||
ginkgo.It(tc.name, func(ctx context.Context) {
|
ginkgo.It(tc.name, func(ctx context.Context) {
|
||||||
ginkgo.By("check if in place pod vertical scaling is supported", func() {
|
podClient := e2epod.NewPodClient(f)
|
||||||
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
if !e2epod.IsInPlacePodVerticalScalingSupportedByRuntime(node) || framework.NodeOSDistroIs("windows") || e2enode.IsARM64(node) {
|
|
||||||
e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
var testPod, patchedPod *v1.Pod
|
var testPod, patchedPod *v1.Pod
|
||||||
var pErr error
|
var pErr error
|
||||||
|
|
||||||
@ -945,12 +933,7 @@ func doPodResizeTests() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func doPodResizeErrorTests() {
|
func doPodResizeErrorTests(f *framework.Framework) {
|
||||||
f := framework.NewDefaultFramework("pod-resize-errors")
|
|
||||||
var podClient *e2epod.PodClient
|
|
||||||
ginkgo.BeforeEach(func() {
|
|
||||||
podClient = e2epod.NewPodClient(f)
|
|
||||||
})
|
|
||||||
|
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
name string
|
name string
|
||||||
@ -985,13 +968,7 @@ func doPodResizeErrorTests() {
|
|||||||
for idx := range tests {
|
for idx := range tests {
|
||||||
tc := tests[idx]
|
tc := tests[idx]
|
||||||
ginkgo.It(tc.name, func(ctx context.Context) {
|
ginkgo.It(tc.name, func(ctx context.Context) {
|
||||||
ginkgo.By("check if in place pod vertical scaling is supported", func() {
|
podClient := e2epod.NewPodClient(f)
|
||||||
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
if !e2epod.IsInPlacePodVerticalScalingSupportedByRuntime(node) || framework.NodeOSDistroIs("windows") || e2enode.IsARM64(node) {
|
|
||||||
e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
var testPod, patchedPod *v1.Pod
|
var testPod, patchedPod *v1.Pod
|
||||||
var pErr error
|
var pErr error
|
||||||
|
|
||||||
@ -1043,7 +1020,17 @@ func doPodResizeErrorTests() {
|
|||||||
// Above tests are performed by doSheduletTests() and doPodResizeResourceQuotaTests()
|
// Above tests are performed by doSheduletTests() and doPodResizeResourceQuotaTests()
|
||||||
// in test/e2e/node/pod_resize.go
|
// in test/e2e/node/pod_resize.go
|
||||||
|
|
||||||
var _ = SIGDescribe("Pod InPlace Resize Container", framework.WithSerial(), feature.InPlacePodVerticalScaling, func() {
|
var _ = SIGDescribe("Pod InPlace Resize Container", framework.WithSerial(), feature.InPlacePodVerticalScaling, "[NodeAlphaFeature:InPlacePodVerticalScaling]", func() {
|
||||||
doPodResizeTests()
|
f := framework.NewDefaultFramework("pod-resize-tests")
|
||||||
doPodResizeErrorTests()
|
|
||||||
|
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||||
|
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
if framework.NodeOSDistroIs("windows") || e2enode.IsARM64(node) {
|
||||||
|
e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
doPodResizeTests(f)
|
||||||
|
doPodResizeErrorTests(f)
|
||||||
})
|
})
|
||||||
|
@ -20,22 +20,20 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
semver "github.com/blang/semver/v4"
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
|
||||||
"github.com/onsi/gomega"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
|
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -104,21 +102,6 @@ type patchSpec struct {
|
|||||||
} `json:"spec"`
|
} `json:"spec"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsInPlacePodVerticalScalingSupportedByRuntime(node *v1.Node) bool {
|
|
||||||
re := regexp.MustCompile("containerd://(.*)")
|
|
||||||
match := re.FindStringSubmatch(node.Status.NodeInfo.ContainerRuntimeVersion)
|
|
||||||
if len(match) != 2 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if ver, verr := semver.ParseTolerant(match[1]); verr == nil {
|
|
||||||
if ver.Compare(semver.MustParse(MinContainerRuntimeVersion)) < 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTestResourceInfo(tcInfo ResizableContainerInfo) (v1.ResourceRequirements, v1.ResourceList, []v1.ContainerResizePolicy) {
|
func getTestResourceInfo(tcInfo ResizableContainerInfo) (v1.ResourceRequirements, v1.ResourceList, []v1.ContainerResizePolicy) {
|
||||||
var res v1.ResourceRequirements
|
var res v1.ResourceRequirements
|
||||||
var alloc v1.ResourceList
|
var alloc v1.ResourceList
|
||||||
@ -237,6 +220,7 @@ func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []Resizab
|
|||||||
|
|
||||||
func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||||
ginkgo.GinkgoHelper()
|
ginkgo.GinkgoHelper()
|
||||||
|
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||||
for i, wantCtr := range wantCtrs {
|
for i, wantCtr := range wantCtrs {
|
||||||
gotCtr := &gotPod.Spec.Containers[i]
|
gotCtr := &gotPod.Spec.Containers[i]
|
||||||
ctr, _ := makeResizableContainer(wantCtr)
|
ctr, _ := makeResizableContainer(wantCtr)
|
||||||
@ -247,6 +231,7 @@ func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
|||||||
|
|
||||||
func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||||
ginkgo.GinkgoHelper()
|
ginkgo.GinkgoHelper()
|
||||||
|
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||||
for i, wantCtr := range wantCtrs {
|
for i, wantCtr := range wantCtrs {
|
||||||
gotCtr := &gotPod.Spec.Containers[i]
|
gotCtr := &gotPod.Spec.Containers[i]
|
||||||
ctr, _ := makeResizableContainer(wantCtr)
|
ctr, _ := makeResizableContainer(wantCtr)
|
||||||
@ -257,6 +242,7 @@ func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
|||||||
|
|
||||||
func VerifyPodAllocations(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) error {
|
func VerifyPodAllocations(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) error {
|
||||||
ginkgo.GinkgoHelper()
|
ginkgo.GinkgoHelper()
|
||||||
|
gomega.Expect(gotPod.Status.ContainerStatuses).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||||
for i, wantCtr := range wantCtrs {
|
for i, wantCtr := range wantCtrs {
|
||||||
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
|
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
|
||||||
if wantCtr.Allocations == nil {
|
if wantCtr.Allocations == nil {
|
||||||
@ -280,6 +266,7 @@ func VerifyPodAllocations(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) err
|
|||||||
|
|
||||||
func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||||
ginkgo.GinkgoHelper()
|
ginkgo.GinkgoHelper()
|
||||||
|
gomega.Expect(gotPod.Status.ContainerStatuses).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||||
for i, wantCtr := range wantCtrs {
|
for i, wantCtr := range wantCtrs {
|
||||||
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
|
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
|
||||||
ctr, _ := makeResizableContainer(wantCtr)
|
ctr, _ := makeResizableContainer(wantCtr)
|
||||||
@ -288,9 +275,10 @@ func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isPodOnCgroupv2Node checks whether the pod is running on cgroupv2 node.
|
||||||
|
// TODO: Deduplicate this function with NPD cluster e2e test:
|
||||||
|
// https://github.com/kubernetes/kubernetes/blob/2049360379bcc5d6467769cef112e6e492d3d2f0/test/e2e/node/node_problem_detector.go#L369
|
||||||
func isPodOnCgroupv2Node(f *framework.Framework, pod *v1.Pod) bool {
|
func isPodOnCgroupv2Node(f *framework.Framework, pod *v1.Pod) bool {
|
||||||
// Determine if pod is running on cgroupv2 or cgroupv1 node
|
|
||||||
//TODO(vinaykul,InPlacePodVerticalScaling): Is there a better way to determine this?
|
|
||||||
cmd := "mount -t cgroup2"
|
cmd := "mount -t cgroup2"
|
||||||
out, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", cmd)
|
out, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -22,37 +22,26 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
|
||||||
"github.com/onsi/gomega"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
|
||||||
resourceapi "k8s.io/kubernetes/pkg/api/v1/resource"
|
resourceapi "k8s.io/kubernetes/pkg/api/v1/resource"
|
||||||
"k8s.io/kubernetes/test/e2e/feature"
|
"k8s.io/kubernetes/test/e2e/feature"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
func doPodResizeResourceQuotaTests() {
|
func doPodResizeResourceQuotaTests(f *framework.Framework) {
|
||||||
f := framework.NewDefaultFramework("pod-resize-resource-quota")
|
|
||||||
var podClient *e2epod.PodClient
|
|
||||||
ginkgo.BeforeEach(func() {
|
|
||||||
podClient = e2epod.NewPodClient(f)
|
|
||||||
})
|
|
||||||
timeouts := framework.NewTimeoutContext()
|
timeouts := framework.NewTimeoutContext()
|
||||||
|
|
||||||
ginkgo.It("pod-resize-resource-quota-test", func(ctx context.Context) {
|
ginkgo.It("pod-resize-resource-quota-test", func(ctx context.Context) {
|
||||||
ginkgo.By("check if in place pod vertical scaling is supported", func() {
|
podClient := e2epod.NewPodClient(f)
|
||||||
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
if !e2epod.IsInPlacePodVerticalScalingSupportedByRuntime(node) || framework.NodeOSDistroIs("windows") || e2enode.IsARM64(node) {
|
|
||||||
e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
resourceQuota := v1.ResourceQuota{
|
resourceQuota := v1.ResourceQuota{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "resize-resource-quota",
|
Name: "resize-resource-quota",
|
||||||
@ -166,21 +155,9 @@ func doPodResizeResourceQuotaTests() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func doPodResizeSchedulerTests() {
|
func doPodResizeSchedulerTests(f *framework.Framework) {
|
||||||
f := framework.NewDefaultFramework("pod-resize-scheduler")
|
|
||||||
var podClient *e2epod.PodClient
|
|
||||||
ginkgo.BeforeEach(func() {
|
|
||||||
podClient = e2epod.NewPodClient(f)
|
|
||||||
})
|
|
||||||
|
|
||||||
ginkgo.It("pod-resize-scheduler-tests", func(ctx context.Context) {
|
ginkgo.It("pod-resize-scheduler-tests", func(ctx context.Context) {
|
||||||
ginkgo.By("check if in place pod vertical scaling is supported", func() {
|
podClient := e2epod.NewPodClient(f)
|
||||||
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
if !e2epod.IsInPlacePodVerticalScalingSupportedByRuntime(node) || framework.NodeOSDistroIs("windows") || e2enode.IsARM64(node) {
|
|
||||||
e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
|
nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
|
||||||
framework.ExpectNoError(err, "failed to get running nodes")
|
framework.ExpectNoError(err, "failed to get running nodes")
|
||||||
gomega.Expect(nodes.Items).ShouldNot(gomega.BeEmpty())
|
gomega.Expect(nodes.Items).ShouldNot(gomega.BeEmpty())
|
||||||
@ -342,9 +319,25 @@ func doPodResizeSchedulerTests() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _ = SIGDescribe(framework.WithSerial(), "Pod InPlace Resize Container (scheduler-focused)", feature.InPlacePodVerticalScaling, func() {
|
var _ = SIGDescribe(framework.WithSerial(), "Pod InPlace Resize Container (scheduler-focused)", feature.InPlacePodVerticalScaling, func() {
|
||||||
doPodResizeSchedulerTests()
|
f := framework.NewDefaultFramework("pod-resize-scheduler-tests")
|
||||||
|
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||||
|
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
if framework.NodeOSDistroIs("windows") || e2enode.IsARM64(node) {
|
||||||
|
e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
doPodResizeSchedulerTests(f)
|
||||||
})
|
})
|
||||||
|
|
||||||
var _ = SIGDescribe("Pod InPlace Resize Container", feature.InPlacePodVerticalScaling, func() {
|
var _ = SIGDescribe("Pod InPlace Resize Container", feature.InPlacePodVerticalScaling, func() {
|
||||||
doPodResizeResourceQuotaTests()
|
f := framework.NewDefaultFramework("pod-resize-tests")
|
||||||
|
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||||
|
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
if framework.NodeOSDistroIs("windows") || e2enode.IsARM64(node) {
|
||||||
|
e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
doPodResizeResourceQuotaTests(f)
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user