mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
scheduler: add pod Overhead support for requests
Signed-off-by: Eric Ernst <eric.ernst@intel.com>
This commit is contained in:
parent
30c9f097ca
commit
62e9717386
@ -726,6 +726,9 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeI
|
|||||||
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
|
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
|
||||||
// regular containers since they run simultaneously.
|
// regular containers since they run simultaneously.
|
||||||
//
|
//
|
||||||
|
// If Pod Overhead is specified and the feature gate is set, the resources defined for Overhead
|
||||||
|
// are added to the calculated Resource request sum
|
||||||
|
//
|
||||||
// Example:
|
// Example:
|
||||||
//
|
//
|
||||||
// Pod:
|
// Pod:
|
||||||
@ -756,6 +759,11 @@ func GetResourceRequest(pod *v1.Pod) *schedulernodeinfo.Resource {
|
|||||||
result.SetMaxResource(container.Resources.Requests)
|
result.SetMaxResource(container.Resources.Requests)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If Overhead is being utilized, add to the total requests for the pod
|
||||||
|
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||||
|
result.Add(pod.Spec.Overhead)
|
||||||
|
}
|
||||||
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +29,10 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||||
@ -86,12 +89,20 @@ func newResourceInitPod(pod *v1.Pod, usage ...schedulernodeinfo.Resource) *v1.Po
|
|||||||
return pod
|
return pod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newResourceOverheadPod(pod *v1.Pod, overhead v1.ResourceList) *v1.Pod {
|
||||||
|
pod.Spec.Overhead = overhead
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
func GetPredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
|
func GetPredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
|
||||||
pm := PredicateMetadataFactory{schedulertesting.FakePodLister{p}}
|
pm := PredicateMetadataFactory{schedulertesting.FakePodLister{p}}
|
||||||
return pm.GetMetadata(p, nodeInfo)
|
return pm.GetMetadata(p, nodeInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPodFitsResources(t *testing.T) {
|
func TestPodFitsResources(t *testing.T) {
|
||||||
|
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
|
||||||
|
|
||||||
enoughPodsTests := []struct {
|
enoughPodsTests := []struct {
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
nodeInfo *schedulernodeinfo.NodeInfo
|
nodeInfo *schedulernodeinfo.NodeInfo
|
||||||
@ -351,6 +362,31 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
ignoredExtendedResources: sets.NewString(string(extendedResourceB)),
|
ignoredExtendedResources: sets.NewString(string(extendedResourceB)),
|
||||||
name: "skip checking ignored extended resource",
|
name: "skip checking ignored extended resource",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceOverheadPod(
|
||||||
|
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||||
|
v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("13")},
|
||||||
|
),
|
||||||
|
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||||
|
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
|
||||||
|
fits: true,
|
||||||
|
ignoredExtendedResources: sets.NewString(string(extendedResourceB)),
|
||||||
|
name: "resources + pod overhead fits",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceOverheadPod(
|
||||||
|
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||||
|
v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")},
|
||||||
|
),
|
||||||
|
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||||
|
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
|
||||||
|
fits: false,
|
||||||
|
ignoredExtendedResources: sets.NewString(string(extendedResourceB)),
|
||||||
|
name: "requests + overhead does not fit for memory",
|
||||||
|
reasons: []PredicateFailureReason{
|
||||||
|
NewInsufficientResourceError(v1.ResourceMemory, 16, 5, 20),
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range enoughPodsTests {
|
for _, test := range enoughPodsTests {
|
||||||
|
@ -91,6 +91,8 @@ func (r *ResourceAllocationPriority) PriorityMap(
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getNonZeroRequests returns the total non-zero requests. If Overhead is defined for the pod and the
|
||||||
|
// PodOverhead feature is enabled, the Overhead is added to the result.
|
||||||
func getNonZeroRequests(pod *v1.Pod) *schedulernodeinfo.Resource {
|
func getNonZeroRequests(pod *v1.Pod) *schedulernodeinfo.Resource {
|
||||||
result := &schedulernodeinfo.Resource{}
|
result := &schedulernodeinfo.Resource{}
|
||||||
for i := range pod.Spec.Containers {
|
for i := range pod.Spec.Containers {
|
||||||
@ -99,5 +101,17 @@ func getNonZeroRequests(pod *v1.Pod) *schedulernodeinfo.Resource {
|
|||||||
result.MilliCPU += cpu
|
result.MilliCPU += cpu
|
||||||
result.Memory += memory
|
result.Memory += memory
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If Overhead is being utilized, add to the total requests for the pod
|
||||||
|
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||||
|
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
|
||||||
|
result.MilliCPU += pod.Spec.Overhead.Cpu().MilliValue()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, found := pod.Spec.Overhead[v1.ResourceMemory]; found {
|
||||||
|
result.Memory += pod.Spec.Overhead.Memory().Value()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
@ -25,8 +25,10 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
@ -613,6 +615,19 @@ func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64)
|
|||||||
// No non-zero resources for GPUs or opaque resources.
|
// No non-zero resources for GPUs or opaque resources.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If Overhead is being utilized, add to the total requests for the pod
|
||||||
|
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||||
|
resPtr.Add(pod.Spec.Overhead)
|
||||||
|
|
||||||
|
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
|
||||||
|
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, found := pod.Spec.Overhead[v1.ResourceMemory]; found {
|
||||||
|
non0Mem += pod.Spec.Overhead.Memory().Value()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,6 +26,9 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewResource(t *testing.T) {
|
func TestNewResource(t *testing.T) {
|
||||||
@ -540,6 +543,9 @@ func TestNodeInfoClone(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeInfoAddPod(t *testing.T) {
|
func TestNodeInfoAddPod(t *testing.T) {
|
||||||
|
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
|
||||||
|
|
||||||
nodeName := "test-node"
|
nodeName := "test-node"
|
||||||
pods := []*v1.Pod{
|
pods := []*v1.Pod{
|
||||||
{
|
{
|
||||||
@ -567,6 +573,9 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
|
Overhead: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -580,8 +589,7 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceCPU: resource.MustParse("200m"),
|
v1.ResourceCPU: resource.MustParse("200m"),
|
||||||
v1.ResourceMemory: resource.MustParse("1Ki"),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Ports: []v1.ContainerPort{
|
Ports: []v1.ContainerPort{
|
||||||
@ -594,6 +602,10 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
|
Overhead: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("500"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -604,15 +616,15 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
requestedResource: &Resource{
|
requestedResource: &Resource{
|
||||||
MilliCPU: 300,
|
MilliCPU: 1300,
|
||||||
Memory: 1524,
|
Memory: 1000,
|
||||||
EphemeralStorage: 0,
|
EphemeralStorage: 0,
|
||||||
AllowedPodNumber: 0,
|
AllowedPodNumber: 0,
|
||||||
ScalarResources: map[v1.ResourceName]int64(nil),
|
ScalarResources: map[v1.ResourceName]int64(nil),
|
||||||
},
|
},
|
||||||
nonzeroRequest: &Resource{
|
nonzeroRequest: &Resource{
|
||||||
MilliCPU: 300,
|
MilliCPU: 1300,
|
||||||
Memory: 1524,
|
Memory: 209716200, //200MB + 1000 specified in requests/overhead
|
||||||
EphemeralStorage: 0,
|
EphemeralStorage: 0,
|
||||||
AllowedPodNumber: 0,
|
AllowedPodNumber: 0,
|
||||||
ScalarResources: map[v1.ResourceName]int64(nil),
|
ScalarResources: map[v1.ResourceName]int64(nil),
|
||||||
@ -653,6 +665,9 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
|
Overhead: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -666,8 +681,7 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceCPU: resource.MustParse("200m"),
|
v1.ResourceCPU: resource.MustParse("200m"),
|
||||||
v1.ResourceMemory: resource.MustParse("1Ki"),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Ports: []v1.ContainerPort{
|
Ports: []v1.ContainerPort{
|
||||||
@ -680,6 +694,10 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
|
Overhead: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("500"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -702,12 +720,23 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeInfoRemovePod(t *testing.T) {
|
func TestNodeInfoRemovePod(t *testing.T) {
|
||||||
|
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
|
||||||
|
|
||||||
nodeName := "test-node"
|
nodeName := "test-node"
|
||||||
pods := []*v1.Pod{
|
pods := []*v1.Pod{
|
||||||
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||||
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// add pod Overhead
|
||||||
|
for _, pod := range pods {
|
||||||
|
pod.Spec.Overhead = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("500"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
errExpected bool
|
errExpected bool
|
||||||
@ -723,15 +752,15 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
requestedResource: &Resource{
|
requestedResource: &Resource{
|
||||||
MilliCPU: 300,
|
MilliCPU: 1300,
|
||||||
Memory: 1524,
|
Memory: 2524,
|
||||||
EphemeralStorage: 0,
|
EphemeralStorage: 0,
|
||||||
AllowedPodNumber: 0,
|
AllowedPodNumber: 0,
|
||||||
ScalarResources: map[v1.ResourceName]int64(nil),
|
ScalarResources: map[v1.ResourceName]int64(nil),
|
||||||
},
|
},
|
||||||
nonzeroRequest: &Resource{
|
nonzeroRequest: &Resource{
|
||||||
MilliCPU: 300,
|
MilliCPU: 1300,
|
||||||
Memory: 1524,
|
Memory: 2524,
|
||||||
EphemeralStorage: 0,
|
EphemeralStorage: 0,
|
||||||
AllowedPodNumber: 0,
|
AllowedPodNumber: 0,
|
||||||
ScalarResources: map[v1.ResourceName]int64(nil),
|
ScalarResources: map[v1.ResourceName]int64(nil),
|
||||||
@ -772,6 +801,10 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
|
Overhead: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("500"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -799,6 +832,10 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
|
Overhead: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("500"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -830,6 +867,10 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
|
Overhead: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("500"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
errExpected: false,
|
errExpected: false,
|
||||||
@ -840,15 +881,15 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
requestedResource: &Resource{
|
requestedResource: &Resource{
|
||||||
MilliCPU: 200,
|
MilliCPU: 700,
|
||||||
Memory: 1024,
|
Memory: 1524,
|
||||||
EphemeralStorage: 0,
|
EphemeralStorage: 0,
|
||||||
AllowedPodNumber: 0,
|
AllowedPodNumber: 0,
|
||||||
ScalarResources: map[v1.ResourceName]int64(nil),
|
ScalarResources: map[v1.ResourceName]int64(nil),
|
||||||
},
|
},
|
||||||
nonzeroRequest: &Resource{
|
nonzeroRequest: &Resource{
|
||||||
MilliCPU: 200,
|
MilliCPU: 700,
|
||||||
Memory: 1024,
|
Memory: 1524,
|
||||||
EphemeralStorage: 0,
|
EphemeralStorage: 0,
|
||||||
AllowedPodNumber: 0,
|
AllowedPodNumber: 0,
|
||||||
ScalarResources: map[v1.ResourceName]int64(nil),
|
ScalarResources: map[v1.ResourceName]int64(nil),
|
||||||
@ -888,6 +929,10 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
|
Overhead: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("500"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Loading…
Reference in New Issue
Block a user