mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #115110 from HirazawaUi/delte-pkg-unused-functions
delete unused functions in pkg directory
This commit is contained in:
commit
7b7b15b821
@ -64,17 +64,3 @@ func PodCompleted(event watch.Event) (bool, error) {
|
|||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServiceAccountHasSecrets returns true if the service account has at least one secret,
|
|
||||||
// false if it does not, or an error.
|
|
||||||
func ServiceAccountHasSecrets(event watch.Event) (bool, error) {
|
|
||||||
switch event.Type {
|
|
||||||
case watch.Deleted:
|
|
||||||
return false, errors.NewNotFound(schema.GroupResource{Resource: "serviceaccounts"}, "")
|
|
||||||
}
|
|
||||||
switch t := event.Object.(type) {
|
|
||||||
case *v1.ServiceAccount:
|
|
||||||
return len(t.Secrets) > 0, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
@ -223,15 +223,6 @@ func ReadURL(url string, client *http.Client, header *http.Header) (body []byte,
|
|||||||
return contents, nil
|
return contents, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadDockerConfigFileFromURL read a docker config file from the given url
|
|
||||||
func ReadDockerConfigFileFromURL(url string, client *http.Client, header *http.Header) (cfg DockerConfig, err error) {
|
|
||||||
if contents, err := ReadURL(url, client, header); err == nil {
|
|
||||||
return ReadDockerConfigFileFromBytes(contents)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadDockerConfigFileFromBytes read a docker config file from the given bytes
|
// ReadDockerConfigFileFromBytes read a docker config file from the given bytes
|
||||||
func ReadDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) {
|
func ReadDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) {
|
||||||
if err = json.Unmarshal(contents, &cfg); err != nil {
|
if err = json.Unmarshal(contents, &cfg); err != nil {
|
||||||
|
@ -48,12 +48,6 @@ func MakeNodeList(nodes []string, nodeResources api.NodeResources) *api.NodeList
|
|||||||
return &list
|
return &list
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewNodeRegistry(nodes []string, nodeResources api.NodeResources) *NodeRegistry {
|
|
||||||
return &NodeRegistry{
|
|
||||||
Nodes: *MakeNodeList(nodes, nodeResources),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *NodeRegistry) SetError(err error) {
|
func (r *NodeRegistry) SetError(err error) {
|
||||||
r.Lock()
|
r.Lock()
|
||||||
defer r.Unlock()
|
defer r.Unlock()
|
||||||
|
@ -28,10 +28,6 @@ import (
|
|||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewServiceRegistry() *ServiceRegistry {
|
|
||||||
return &ServiceRegistry{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ServiceRegistry struct {
|
type ServiceRegistry struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
List api.ServiceList
|
List api.ServiceList
|
||||||
|
@ -52,11 +52,6 @@ func FalsePredicateExtender(pod *v1.Pod, node *v1.Node) *framework.Status {
|
|||||||
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("pod is unschedulable on the node %q", node.Name))
|
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("pod is unschedulable on the node %q", node.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
// FalseAndUnresolvePredicateExtender implements fitPredicate to always return unschedulable and unresolvable status.
|
|
||||||
func FalseAndUnresolvePredicateExtender(pod *v1.Pod, node *v1.Node) *framework.Status {
|
|
||||||
return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("pod is unschedulable and unresolvable on the node %q", node.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
// TruePredicateExtender implements FitPredicate function to always return success status.
|
// TruePredicateExtender implements FitPredicate function to always return success status.
|
||||||
func TruePredicateExtender(pod *v1.Pod, node *v1.Node) *framework.Status {
|
func TruePredicateExtender(pod *v1.Pod, node *v1.Node) *framework.Status {
|
||||||
return framework.NewStatus(framework.Success)
|
return framework.NewStatus(framework.Success)
|
||||||
|
@ -60,78 +60,3 @@ func MakeNodesAndPodsForEvenPodsSpread(labels map[string]string, existingPodsNum
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// MakeNodesAndPodsForPodAffinity serves as a testing helper for Pod(Anti)Affinity feature.
|
|
||||||
// It builds a fake cluster containing running Pods and Nodes.
|
|
||||||
// For simplicity, the Nodes will be labelled with "region", "zone" and "node". Nodes[i] will be applied with:
|
|
||||||
// - "region": "region" + i%3
|
|
||||||
// - "zone": "zone" + i%10
|
|
||||||
// - "node": "node" + i
|
|
||||||
// The Pods will be applied with various combinations of PodAffinity and PodAntiAffinity terms.
|
|
||||||
func MakeNodesAndPodsForPodAffinity(existingPodsNum, allNodesNum int) (existingPods []*v1.Pod, allNodes []*v1.Node) {
|
|
||||||
tpKeyToSizeMap := map[string]int{
|
|
||||||
"region": 3,
|
|
||||||
"zone": 10,
|
|
||||||
"node": allNodesNum,
|
|
||||||
}
|
|
||||||
// build nodes to spread across all topology domains
|
|
||||||
for i := 0; i < allNodesNum; i++ {
|
|
||||||
nodeName := fmt.Sprintf("node%d", i)
|
|
||||||
nodeWrapper := MakeNode().Name(nodeName)
|
|
||||||
for tpKey, size := range tpKeyToSizeMap {
|
|
||||||
nodeWrapper = nodeWrapper.Label(tpKey, fmt.Sprintf("%s%d", tpKey, i%size))
|
|
||||||
}
|
|
||||||
allNodes = append(allNodes, nodeWrapper.Obj())
|
|
||||||
}
|
|
||||||
|
|
||||||
labels := []string{"foo", "bar", "baz"}
|
|
||||||
tpKeys := []string{"region", "zone", "node"}
|
|
||||||
|
|
||||||
// Build pods.
|
|
||||||
// Each pod will be created with one affinity and one anti-affinity terms using all combinations of
|
|
||||||
// affinity and anti-affinity kinds listed below
|
|
||||||
// e.g., the first pod will have {affinity, anti-affinity} terms of kinds {NilPodAffinity, NilPodAffinity};
|
|
||||||
// the second will be {NilPodAffinity, PodAntiAffinityWithRequiredReq}, etc.
|
|
||||||
affinityKinds := []PodAffinityKind{
|
|
||||||
NilPodAffinity,
|
|
||||||
PodAffinityWithRequiredReq,
|
|
||||||
PodAffinityWithPreferredReq,
|
|
||||||
PodAffinityWithRequiredPreferredReq,
|
|
||||||
}
|
|
||||||
antiAffinityKinds := []PodAffinityKind{
|
|
||||||
NilPodAffinity,
|
|
||||||
PodAntiAffinityWithRequiredReq,
|
|
||||||
PodAntiAffinityWithPreferredReq,
|
|
||||||
PodAntiAffinityWithRequiredPreferredReq,
|
|
||||||
}
|
|
||||||
|
|
||||||
totalSize := len(affinityKinds) * len(antiAffinityKinds)
|
|
||||||
for i := 0; i < existingPodsNum; i++ {
|
|
||||||
podWrapper := MakePod().Name(fmt.Sprintf("pod%d", i)).Node(fmt.Sprintf("node%d", i%allNodesNum))
|
|
||||||
label, tpKey := labels[i%len(labels)], tpKeys[i%len(tpKeys)]
|
|
||||||
|
|
||||||
affinityIdx := i % totalSize
|
|
||||||
// len(affinityKinds) is equal to len(antiAffinityKinds)
|
|
||||||
leftIdx, rightIdx := affinityIdx/len(affinityKinds), affinityIdx%len(affinityKinds)
|
|
||||||
podWrapper = podWrapper.PodAffinityExists(label, tpKey, affinityKinds[leftIdx])
|
|
||||||
podWrapper = podWrapper.PodAntiAffinityExists(label, tpKey, antiAffinityKinds[rightIdx])
|
|
||||||
existingPods = append(existingPods, podWrapper.Obj())
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeNodesAndPods serves as a testing helper to generate regular Nodes and Pods
|
|
||||||
// that don't use any advanced scheduling features.
|
|
||||||
func MakeNodesAndPods(existingPodsNum, allNodesNum int) (existingPods []*v1.Pod, allNodes []*v1.Node) {
|
|
||||||
// build nodes
|
|
||||||
for i := 0; i < allNodesNum; i++ {
|
|
||||||
allNodes = append(allNodes, MakeNode().Name(fmt.Sprintf("node%d", i)).Obj())
|
|
||||||
}
|
|
||||||
// build pods
|
|
||||||
for i := 0; i < existingPodsNum; i++ {
|
|
||||||
podWrapper := MakePod().Name(fmt.Sprintf("pod%d", i)).Node(fmt.Sprintf("node%d", i%allNodesNum))
|
|
||||||
existingPods = append(existingPods, podWrapper.Obj())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
@ -42,21 +42,3 @@ func GetProfileName(pod *v1.Pod, containerName string) string {
|
|||||||
func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string {
|
func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string {
|
||||||
return annotations[v1.AppArmorBetaContainerAnnotationKeyPrefix+containerName]
|
return annotations[v1.AppArmorBetaContainerAnnotationKeyPrefix+containerName]
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetProfileName sets the name of the profile to use with the container.
|
|
||||||
func SetProfileName(pod *v1.Pod, containerName, profileName string) error {
|
|
||||||
if pod.Annotations == nil {
|
|
||||||
pod.Annotations = map[string]string{}
|
|
||||||
}
|
|
||||||
pod.Annotations[v1.AppArmorBetaContainerAnnotationKeyPrefix+containerName] = profileName
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetProfileNameFromPodAnnotations sets the name of the profile to use with the container.
|
|
||||||
func SetProfileNameFromPodAnnotations(annotations map[string]string, containerName, profileName string) error {
|
|
||||||
if annotations == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
annotations[v1.AppArmorBetaContainerAnnotationKeyPrefix+containerName] = profileName
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -20,30 +20,6 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HasPrivilegedRequest returns the value of SecurityContext.Privileged, taking into account
|
|
||||||
// the possibility of nils
|
|
||||||
func HasPrivilegedRequest(container *v1.Container) bool {
|
|
||||||
if container.SecurityContext == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if container.SecurityContext.Privileged == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return *container.SecurityContext.Privileged
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasCapabilitiesRequest returns true if Adds or Drops are defined in the security context
|
|
||||||
// capabilities, taking into account nils
|
|
||||||
func HasCapabilitiesRequest(container *v1.Container) bool {
|
|
||||||
if container.SecurityContext == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if container.SecurityContext.Capabilities == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return len(container.SecurityContext.Capabilities.Add) > 0 || len(container.SecurityContext.Capabilities.Drop) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasWindowsHostProcessRequest returns true if container should run as HostProcess container,
|
// HasWindowsHostProcessRequest returns true if container should run as HostProcess container,
|
||||||
// taking into account nils
|
// taking into account nils
|
||||||
func HasWindowsHostProcessRequest(pod *v1.Pod, container *v1.Container) bool {
|
func HasWindowsHostProcessRequest(pod *v1.Pod, container *v1.Container) bool {
|
||||||
|
Loading…
Reference in New Issue
Block a user