mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-08 11:38:15 +00:00
Merge pull request #123629 from thockin/master
Get rid of unused API type NodeResources
This commit is contained in:
commit
4164e7c3a7
@ -36,7 +36,6 @@ API rule violation: names_match,k8s.io/api/core/v1,ISCSIPersistentVolumeSource,D
|
||||
API rule violation: names_match,k8s.io/api/core/v1,ISCSIPersistentVolumeSource,SessionCHAPAuth
|
||||
API rule violation: names_match,k8s.io/api/core/v1,ISCSIVolumeSource,DiscoveryCHAPAuth
|
||||
API rule violation: names_match,k8s.io/api/core/v1,ISCSIVolumeSource,SessionCHAPAuth
|
||||
API rule violation: names_match,k8s.io/api/core/v1,NodeResources,Capacity
|
||||
API rule violation: names_match,k8s.io/api/core/v1,NodeSpec,DoNotUseExternalID
|
||||
API rule violation: names_match,k8s.io/api/core/v1,PersistentVolumeSource,CephFS
|
||||
API rule violation: names_match,k8s.io/api/core/v1,PersistentVolumeSource,StorageOS
|
||||
|
@ -4978,14 +4978,6 @@ type NodeAddress struct {
|
||||
Address string
|
||||
}
|
||||
|
||||
// NodeResources is an object for conveying resource information about a node.
|
||||
// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.
|
||||
type NodeResources struct {
|
||||
// Capacity represents the available resources of a node
|
||||
// +optional
|
||||
Capacity ResourceList
|
||||
}
|
||||
|
||||
// ResourceName is the name identifying various resources in a ResourceList.
|
||||
type ResourceName string
|
||||
|
||||
|
30
pkg/apis/core/v1/zz_generated.conversion.go
generated
30
pkg/apis/core/v1/zz_generated.conversion.go
generated
@ -1052,16 +1052,6 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.NodeResources)(nil), (*core.NodeResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_NodeResources_To_core_NodeResources(a.(*v1.NodeResources), b.(*core.NodeResources), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.NodeResources)(nil), (*v1.NodeResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_NodeResources_To_v1_NodeResources(a.(*core.NodeResources), b.(*v1.NodeResources), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.NodeSelector)(nil), (*core.NodeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_NodeSelector_To_core_NodeSelector(a.(*v1.NodeSelector), b.(*core.NodeSelector), scope)
|
||||
}); err != nil {
|
||||
@ -5049,26 +5039,6 @@ func Convert_url_Values_To_v1_NodeProxyOptions(in *url.Values, out *v1.NodeProxy
|
||||
return autoConvert_url_Values_To_v1_NodeProxyOptions(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_NodeResources_To_core_NodeResources(in *v1.NodeResources, out *core.NodeResources, s conversion.Scope) error {
|
||||
out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_NodeResources_To_core_NodeResources is an autogenerated conversion function.
|
||||
func Convert_v1_NodeResources_To_core_NodeResources(in *v1.NodeResources, out *core.NodeResources, s conversion.Scope) error {
|
||||
return autoConvert_v1_NodeResources_To_core_NodeResources(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_NodeResources_To_v1_NodeResources(in *core.NodeResources, out *v1.NodeResources, s conversion.Scope) error {
|
||||
out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_NodeResources_To_v1_NodeResources is an autogenerated conversion function.
|
||||
func Convert_core_NodeResources_To_v1_NodeResources(in *core.NodeResources, out *v1.NodeResources, s conversion.Scope) error {
|
||||
return autoConvert_core_NodeResources_To_v1_NodeResources(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_NodeSelector_To_core_NodeSelector(in *v1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error {
|
||||
out.NodeSelectorTerms = *(*[]core.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms))
|
||||
return nil
|
||||
|
23
pkg/apis/core/zz_generated.deepcopy.go
generated
23
pkg/apis/core/zz_generated.deepcopy.go
generated
@ -2727,29 +2727,6 @@ func (in *NodeProxyOptions) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResources) DeepCopyInto(out *NodeResources) {
|
||||
*out = *in
|
||||
if in.Capacity != nil {
|
||||
in, out := &in.Capacity, &out.Capacity
|
||||
*out = make(ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources.
|
||||
func (in *NodeResources) DeepCopy() *NodeResources {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeResources)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeSelector) DeepCopyInto(out *NodeSelector) {
|
||||
*out = *in
|
||||
|
31
pkg/generated/openapi/zz_generated.openapi.go
generated
31
pkg/generated/openapi/zz_generated.openapi.go
generated
@ -460,7 +460,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
|
||||
"k8s.io/api/core/v1.NodeDaemonEndpoints": schema_k8sio_api_core_v1_NodeDaemonEndpoints(ref),
|
||||
"k8s.io/api/core/v1.NodeList": schema_k8sio_api_core_v1_NodeList(ref),
|
||||
"k8s.io/api/core/v1.NodeProxyOptions": schema_k8sio_api_core_v1_NodeProxyOptions(ref),
|
||||
"k8s.io/api/core/v1.NodeResources": schema_k8sio_api_core_v1_NodeResources(ref),
|
||||
"k8s.io/api/core/v1.NodeSelector": schema_k8sio_api_core_v1_NodeSelector(ref),
|
||||
"k8s.io/api/core/v1.NodeSelectorRequirement": schema_k8sio_api_core_v1_NodeSelectorRequirement(ref),
|
||||
"k8s.io/api/core/v1.NodeSelectorTerm": schema_k8sio_api_core_v1_NodeSelectorTerm(ref),
|
||||
@ -23355,36 +23354,6 @@ func schema_k8sio_api_core_v1_NodeProxyOptions(ref common.ReferenceCallback) com
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_core_v1_NodeResources(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "NodeResources is an object for conveying resource information about a node. see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"Capacity": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Capacity represents the available resources of a node",
|
||||
Type: []string{"object"},
|
||||
AdditionalProperties: &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"Capacity"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_core_v1_NodeSelector(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
|
@ -123,16 +123,14 @@ var (
|
||||
hugePageResourceA = v1helper.HugePageResourceName(resource.MustParse("2Mi"))
|
||||
)
|
||||
|
||||
func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.NodeResources {
|
||||
return v1.NodeResources{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
|
||||
hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
|
||||
},
|
||||
func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.ResourceList {
|
||||
return v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
|
||||
hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
|
||||
}
|
||||
}
|
||||
|
||||
@ -194,7 +192,7 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
})),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
name: "no resources/port/host requested always fits",
|
||||
},
|
||||
@ -210,7 +208,7 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
})),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
reasons: []PredicateFailureReason{
|
||||
&InsufficientResourceError{ResourceName: v1.ResourceCPU, Requested: 8, Used: 5, Capacity: 10},
|
||||
@ -227,7 +225,7 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
nodeInfo: schedulerframework.NewNodeInfo(),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
reasons: []PredicateFailureReason{&PredicateFailureError{nodename.Name, nodename.ErrReason}},
|
||||
name: "host not match",
|
||||
@ -237,7 +235,7 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
nodeInfo: schedulerframework.NewNodeInfo(newPodWithPort(123)),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
reasons: []PredicateFailureReason{&PredicateFailureError{nodeports.Name, nodeports.ErrReason}},
|
||||
name: "hostport conflict",
|
||||
@ -260,7 +258,7 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
{Key: "bar", Effect: v1.TaintEffectNoExecute},
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
name: "taint/toleration match",
|
||||
},
|
||||
@ -274,7 +272,7 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
{Key: "foo", Effect: v1.TaintEffectNoSchedule},
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
name: "NoSchedule taint/toleration not match",
|
||||
},
|
||||
@ -288,7 +286,7 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
{Key: "bar", Effect: v1.TaintEffectNoExecute},
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
reasons: []PredicateFailureReason{&PredicateFailureError{tainttoleration.Name, tainttoleration.ErrReasonNotMatch}},
|
||||
name: "NoExecute taint/toleration not match",
|
||||
@ -303,7 +301,7 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
{Key: "baz", Effect: v1.TaintEffectPreferNoSchedule},
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
name: "PreferNoSchedule taint/toleration not match",
|
||||
},
|
||||
@ -324,7 +322,7 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
{Key: "bar", Effect: v1.TaintEffectNoExecute},
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
name: "static pods ignore taints",
|
||||
},
|
||||
|
@ -37,13 +37,13 @@ type NodeRegistry struct {
|
||||
}
|
||||
|
||||
// MakeNodeList constructs api.NodeList from list of node names and a NodeResource.
|
||||
func MakeNodeList(nodes []string, nodeResources api.NodeResources) *api.NodeList {
|
||||
func MakeNodeList(nodes []string, nodeResources api.ResourceList) *api.NodeList {
|
||||
list := api.NodeList{
|
||||
Items: make([]api.Node, len(nodes)),
|
||||
}
|
||||
for i := range nodes {
|
||||
list.Items[i].Name = nodes[i]
|
||||
list.Items[i].Status.Capacity = nodeResources.Capacity
|
||||
list.Items[i].Status.Capacity = nodeResources
|
||||
}
|
||||
return &list
|
||||
}
|
||||
|
@ -46,16 +46,14 @@ var (
|
||||
hugePageResourceA = v1.ResourceName(v1.ResourceHugePagesPrefix + "2Mi")
|
||||
)
|
||||
|
||||
func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.NodeResources {
|
||||
return v1.NodeResources{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
|
||||
hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
|
||||
},
|
||||
func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.ResourceList {
|
||||
return v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
|
||||
hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
|
||||
}
|
||||
}
|
||||
|
||||
@ -492,7 +490,7 @@ func TestEnoughRequests(t *testing.T) {
|
||||
|
||||
for _, test := range enoughPodsTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5), Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
|
||||
if test.args.ScoringStrategy == nil {
|
||||
@ -644,7 +642,7 @@ func TestStorageRequests(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5), Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
|
||||
p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{})
|
||||
|
2461
staging/src/k8s.io/api/core/v1/generated.pb.go
generated
2461
staging/src/k8s.io/api/core/v1/generated.pb.go
generated
File diff suppressed because it is too large
Load Diff
@ -2571,13 +2571,6 @@ message NodeProxyOptions {
|
||||
optional string path = 1;
|
||||
}
|
||||
|
||||
// NodeResources is an object for conveying resource information about a node.
|
||||
// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.
|
||||
message NodeResources {
|
||||
// Capacity represents the available resources of a node
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 1;
|
||||
}
|
||||
|
||||
// A node selector represents the union of the results of one or more label queries
|
||||
// over a set of nodes; that is, it represents the OR of the selectors represented
|
||||
// by the node selector terms.
|
||||
|
@ -7292,13 +7292,6 @@ type Sysctl struct {
|
||||
Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
|
||||
}
|
||||
|
||||
// NodeResources is an object for conveying resource information about a node.
|
||||
// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.
|
||||
type NodeResources struct {
|
||||
// Capacity represents the available resources of a node
|
||||
Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
|
||||
}
|
||||
|
||||
const (
|
||||
// Enable stdin for remote command execution
|
||||
ExecStdinParam = "input"
|
||||
|
@ -1203,15 +1203,6 @@ func (NodeProxyOptions) SwaggerDoc() map[string]string {
|
||||
return map_NodeProxyOptions
|
||||
}
|
||||
|
||||
var map_NodeResources = map[string]string{
|
||||
"": "NodeResources is an object for conveying resource information about a node. see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.",
|
||||
"Capacity": "Capacity represents the available resources of a node",
|
||||
}
|
||||
|
||||
func (NodeResources) SwaggerDoc() map[string]string {
|
||||
return map_NodeResources
|
||||
}
|
||||
|
||||
var map_NodeSelector = map[string]string{
|
||||
"": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.",
|
||||
"nodeSelectorTerms": "Required. A list of node selector terms. The terms are ORed.",
|
||||
|
@ -2725,29 +2725,6 @@ func (in *NodeProxyOptions) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResources) DeepCopyInto(out *NodeResources) {
|
||||
*out = *in
|
||||
if in.Capacity != nil {
|
||||
in, out := &in.Capacity, &out.Capacity
|
||||
*out = make(ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources.
|
||||
func (in *NodeResources) DeepCopy() *NodeResources {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeResources)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeSelector) DeepCopyInto(out *NodeSelector) {
|
||||
*out = *in
|
||||
|
@ -80,7 +80,7 @@ type Cloud struct {
|
||||
ExtIDErr map[types.NodeName]error
|
||||
InstanceTypes map[types.NodeName]string
|
||||
Machines []types.NodeName
|
||||
NodeResources *v1.NodeResources
|
||||
NodeResources v1.ResourceList
|
||||
ClusterList []string
|
||||
MasterName string
|
||||
ExternalIP net.IP
|
||||
|
Loading…
Reference in New Issue
Block a user