mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Hack PatchNodeStatus() to override the patch type on Status.Addresses
This commit is contained in:
parent
e6ca651bed
commit
05a9634fb3
@ -2166,3 +2166,156 @@ func TestNodeStatusHasChanged(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUpdateNodeAddresses(t *testing.T) {
|
||||||
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
|
defer testKubelet.Cleanup()
|
||||||
|
kubelet := testKubelet.kubelet
|
||||||
|
kubeClient := testKubelet.fakeKubeClient
|
||||||
|
|
||||||
|
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
||||||
|
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Name string
|
||||||
|
Before []v1.NodeAddress
|
||||||
|
After []v1.NodeAddress
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "nil to populated",
|
||||||
|
Before: nil,
|
||||||
|
After: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "empty to populated",
|
||||||
|
Before: []v1.NodeAddress{},
|
||||||
|
After: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "populated to nil",
|
||||||
|
Before: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
After: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "populated to empty",
|
||||||
|
Before: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
After: []v1.NodeAddress{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "multiple addresses of same type, no change",
|
||||||
|
Before: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.3"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
After: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.3"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "1 InternalIP to 2 InternalIP",
|
||||||
|
Before: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
After: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "2 InternalIP to 1 InternalIP",
|
||||||
|
Before: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
After: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "2 InternalIP to 2 different InternalIP",
|
||||||
|
Before: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
After: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.3"},
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.4"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "2 InternalIP to reversed order",
|
||||||
|
Before: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
After: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.Name, func(t *testing.T) {
|
||||||
|
oldNode := &v1.Node{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||||
|
Spec: v1.NodeSpec{},
|
||||||
|
Status: v1.NodeStatus{
|
||||||
|
Addresses: test.Before,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
expectedNode := &v1.Node{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||||
|
Spec: v1.NodeSpec{},
|
||||||
|
Status: v1.NodeStatus{
|
||||||
|
Addresses: test.After,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := kubeClient.CoreV1().Nodes().Update(oldNode)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
|
||||||
|
func(node *v1.Node) error {
|
||||||
|
node.Status.Addresses = expectedNode.Status.Addresses
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NoError(t, kubelet.updateNodeStatus())
|
||||||
|
|
||||||
|
actions := kubeClient.Actions()
|
||||||
|
lastAction := actions[len(actions)-1]
|
||||||
|
assert.IsType(t, core.PatchActionImpl{}, lastAction)
|
||||||
|
patchAction := lastAction.(core.PatchActionImpl)
|
||||||
|
|
||||||
|
updatedNode, err := applyNodeStatusPatch(oldNode, patchAction.GetPatch())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.True(t, apiequality.Semantic.DeepEqual(updatedNode, expectedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -12,6 +12,7 @@ go_library(
|
|||||||
importpath = "k8s.io/kubernetes/pkg/util/node",
|
importpath = "k8s.io/kubernetes/pkg/util/node",
|
||||||
deps = [
|
deps = [
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/equality"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||||
@ -195,12 +196,21 @@ func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, n
|
|||||||
return nil, fmt.Errorf("failed to Marshal oldData for node %q: %v", nodeName, err)
|
return nil, fmt.Errorf("failed to Marshal oldData for node %q: %v", nodeName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodeStatus.Addresses is incorrectly annotated as patchStrategy=merge, which
|
||||||
|
// will cause strategicpatch.CreateTwoWayMergePatch to create an incorrect patch
|
||||||
|
// if it changed.
|
||||||
|
manuallyPatchAddresses := (len(oldNode.Status.Addresses) > 0) && !equality.Semantic.DeepEqual(oldNode.Status.Addresses, newNode.Status.Addresses)
|
||||||
|
|
||||||
// Reset spec to make sure only patch for Status or ObjectMeta is generated.
|
// Reset spec to make sure only patch for Status or ObjectMeta is generated.
|
||||||
// Note that we don't reset ObjectMeta here, because:
|
// Note that we don't reset ObjectMeta here, because:
|
||||||
// 1. This aligns with Nodes().UpdateStatus().
|
// 1. This aligns with Nodes().UpdateStatus().
|
||||||
// 2. Some component does use this to update node annotations.
|
// 2. Some component does use this to update node annotations.
|
||||||
newNode.Spec = oldNode.Spec
|
diffNode := newNode.DeepCopy()
|
||||||
newData, err := json.Marshal(newNode)
|
diffNode.Spec = oldNode.Spec
|
||||||
|
if manuallyPatchAddresses {
|
||||||
|
diffNode.Status.Addresses = oldNode.Status.Addresses
|
||||||
|
}
|
||||||
|
newData, err := json.Marshal(diffNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to Marshal newData for node %q: %v", nodeName, err)
|
return nil, fmt.Errorf("failed to Marshal newData for node %q: %v", nodeName, err)
|
||||||
}
|
}
|
||||||
@ -209,5 +219,63 @@ func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, n
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for node %q: %v", nodeName, err)
|
return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for node %q: %v", nodeName, err)
|
||||||
}
|
}
|
||||||
|
if manuallyPatchAddresses {
|
||||||
|
patchBytes, err = fixupPatchForNodeStatusAddresses(patchBytes, newNode.Status.Addresses)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to fix up NodeAddresses in patch for node %q: %v", nodeName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return patchBytes, nil
|
return patchBytes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fixupPatchForNodeStatusAddresses adds a replace-strategy patch for Status.Addresses to
|
||||||
|
// the existing patch
|
||||||
|
func fixupPatchForNodeStatusAddresses(patchBytes []byte, addresses []v1.NodeAddress) ([]byte, error) {
|
||||||
|
// Given patchBytes='{"status": {"conditions": [ ... ], "phase": ...}}' and
|
||||||
|
// addresses=[{"type": "InternalIP", "address": "10.0.0.1"}], we need to generate:
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "status": {
|
||||||
|
// "conditions": [ ... ],
|
||||||
|
// "phase": ...,
|
||||||
|
// "addresses": [
|
||||||
|
// {
|
||||||
|
// "type": "InternalIP",
|
||||||
|
// "address": "10.0.0.1"
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// "$patch": "replace"
|
||||||
|
// }
|
||||||
|
// ]
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
var patchMap map[string]interface{}
|
||||||
|
if err := json.Unmarshal(patchBytes, &patchMap); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
addrBytes, err := json.Marshal(addresses)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var addrArray []interface{}
|
||||||
|
if err := json.Unmarshal(addrBytes, &addrArray); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
addrArray = append(addrArray, map[string]interface{}{"$patch": "replace"})
|
||||||
|
|
||||||
|
status := patchMap["status"]
|
||||||
|
if status == nil {
|
||||||
|
status = map[string]interface{}{}
|
||||||
|
patchMap["status"] = status
|
||||||
|
}
|
||||||
|
statusMap, ok := status.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected data in patch")
|
||||||
|
}
|
||||||
|
statusMap["addresses"] = addrArray
|
||||||
|
|
||||||
|
return json.Marshal(patchMap)
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user