mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
types modifications + conversion + conversion testing
This commit is contained in:
parent
72719d5875
commit
54d42e6a65
@ -2857,6 +2857,13 @@ type PodDNSConfigOption struct {
|
||||
Value *string
|
||||
}
|
||||
|
||||
// IP address information. Each entry includes:
|
||||
// IP: An IP address allocated to the pod. Routable at least within
|
||||
// the cluster.
|
||||
type PodIP struct {
|
||||
IP string
|
||||
}
|
||||
|
||||
// PodStatus represents information about the status of a pod. Status may trail the actual
|
||||
// state of a system.
|
||||
type PodStatus struct {
|
||||
@ -2877,11 +2884,13 @@ type PodStatus struct {
|
||||
// give the resources on this node to a higher priority pod that is created after preemption.
|
||||
// +optional
|
||||
NominatedNodeName string
|
||||
|
||||
// +optional
|
||||
HostIP string
|
||||
|
||||
// PodIPs holds all of the known IP addresses allocated to the pod. Pods may be assigned AT MOST
|
||||
// one value for each of IPv4 and IPv6.
|
||||
// +optional
|
||||
PodIP string
|
||||
PodIPs []PodIP
|
||||
|
||||
// Date and time at which the object was acknowledged by the Kubelet.
|
||||
// This is before the Kubelet pulled the container image(s) for the pod.
|
||||
@ -3465,10 +3474,11 @@ type EndpointsList struct {
|
||||
|
||||
// NodeSpec describes the attributes that a node is created with.
|
||||
type NodeSpec struct {
|
||||
// PodCIDR represents the pod IP range assigned to the node
|
||||
// PodCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. It may
|
||||
// contain AT MOST one value for each of IPv4 and IPv6.
|
||||
// Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs.
|
||||
// +optional
|
||||
PodCIDR string
|
||||
PodCIDRs []string
|
||||
|
||||
// ID of the node assigned by the cloud provider
|
||||
// Note: format is "<ProviderName>://<ProviderSpecificNodeID>"
|
||||
|
@ -35,6 +35,10 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
|
||||
err := scheme.AddConversionFuncs(
|
||||
Convert_core_Pod_To_v1_Pod,
|
||||
Convert_core_PodSpec_To_v1_PodSpec,
|
||||
Convert_v1_PodStatus_To_core_PodStatus,
|
||||
Convert_core_PodStatus_To_v1_PodStatus,
|
||||
Convert_core_NodeSpec_To_v1_NodeSpec,
|
||||
Convert_v1_NodeSpec_To_core_NodeSpec,
|
||||
Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec,
|
||||
Convert_core_ServiceSpec_To_v1_ServiceSpec,
|
||||
Convert_v1_Pod_To_core_Pod,
|
||||
@ -270,6 +274,40 @@ func Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec,
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodStatus, s conversion.Scope) error {
|
||||
if err := autoConvert_v1_PodStatus_To_core_PodStatus(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If both fields (v1.PodIPs and v1.PodIP) are provided, then test v1.PodIP == v1.PodIPs[0]
|
||||
if (len(in.PodIP) > 0 && len(in.PodIPs) > 0) && (in.PodIP != in.PodIPs[0].IP) {
|
||||
return fmt.Errorf("conversion Error: v1.PodIP(%v) != v1.PodIPs[0](%v)", in.PodIP, in.PodIPs[0].IP)
|
||||
}
|
||||
// at the this point, autoConvert copied v1.PodIPs -> core.PodIPs
|
||||
// if v1.PodIPs was empty but v1.PodIP is not, then set core.PodIPs[0] with v1.PodIP
|
||||
if len(in.PodIP) > 0 && len(in.PodIPs) == 0 {
|
||||
out.PodIPs = []core.PodIP{
|
||||
{
|
||||
IP: in.PodIP,
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodStatus, s conversion.Scope) error {
|
||||
if err := autoConvert_core_PodStatus_To_v1_PodStatus(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
// at the this point autoConvert copied core.PodIPs -> v1.PodIPs
|
||||
// v1.PodIP (singular value field, which does not exist in core) needs to
|
||||
// be set with core.PodIPs[0]
|
||||
if len(in.PodIPs) > 0 {
|
||||
out.PodIP = in.PodIPs[0].IP
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The following two v1.PodSpec conversions are done here to support v1.ServiceAccount
|
||||
// as an alias for ServiceAccountName.
|
||||
func Convert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error {
|
||||
@ -292,6 +330,36 @@ func Convert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s con
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error {
|
||||
if err := autoConvert_core_NodeSpec_To_v1_NodeSpec(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
// at the this point autoConvert copied core.PodCIDRs -> v1.PodCIDRs
|
||||
// v1.PodCIDR (singular value field, which does not exist in core) needs to
|
||||
// be set with core.PodCIDRs[0]
|
||||
if len(in.PodCIDRs) > 0 {
|
||||
out.PodCIDR = in.PodCIDRs[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error {
|
||||
if err := autoConvert_v1_NodeSpec_To_core_NodeSpec(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
// If both fields (v1.PodCIDRs and v1.PodCIDR) are provided, then test v1.PodCIDR == v1.PodCIDRs[0]
|
||||
if (len(in.PodCIDR) > 0 && len(in.PodCIDRs) > 0) && (in.PodCIDR != in.PodCIDRs[0]) {
|
||||
return fmt.Errorf("conversion Error: v1.PodCIDR(%v) != v1.CIDRs[0](%v)", in.PodCIDR, in.PodCIDRs[0])
|
||||
}
|
||||
|
||||
// at the this point, autoConvert copied v1.PodCIDRs -> core.PodCIDRs
|
||||
// if v1.PodCIDRs was empty but v1.PodCIDR is not, then set core.PodCIDRs[0] with v1.PodCIDR
|
||||
if len(in.PodCIDR) > 0 && len(in.PodCIDRs) == 0 {
|
||||
out.PodCIDRs = []string{in.PodCIDR}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error {
|
||||
if err := autoConvert_v1_PodSpec_To_core_PodSpec(in, out, s); err != nil {
|
||||
return err
|
||||
|
@ -346,3 +346,279 @@ func roundTripRS(t *testing.T, rs *apps.ReplicaSet) *apps.ReplicaSet {
|
||||
}
|
||||
return obj3
|
||||
}
|
||||
|
||||
func Test_core_PodStatus_to_v1_PodStatus(t *testing.T) {
|
||||
// core to v1
|
||||
testInputs := []core.PodStatus{
|
||||
{
|
||||
// one IP
|
||||
PodIPs: []core.PodIP{
|
||||
{
|
||||
IP: "1.1.1.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// no ips
|
||||
PodIPs: nil,
|
||||
},
|
||||
{
|
||||
// list of ips
|
||||
PodIPs: []core.PodIP{
|
||||
{
|
||||
IP: "1.1.1.1",
|
||||
},
|
||||
{
|
||||
IP: "2000::",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, input := range testInputs {
|
||||
v1PodStatus := v1.PodStatus{}
|
||||
if err := corev1.Convert_core_PodStatus_To_v1_PodStatus(&input, &v1PodStatus, nil); nil != err {
|
||||
t.Errorf("%v: Convert core.PodStatus to v1.PodStatus failed with error %v", i, err.Error())
|
||||
}
|
||||
|
||||
if len(input.PodIPs) == 0 {
|
||||
// no more work needed
|
||||
continue
|
||||
}
|
||||
// Primary IP was not set..
|
||||
if len(v1PodStatus.PodIP) == 0 {
|
||||
t.Errorf("%v: Convert core.PodStatus to v1.PodStatus failed out.PodIP is empty, should be %v", i, v1PodStatus.PodIP)
|
||||
}
|
||||
|
||||
// Primary should always == in.PodIPs[0].IP
|
||||
if len(input.PodIPs) > 0 && v1PodStatus.PodIP != input.PodIPs[0].IP {
|
||||
t.Errorf("%v: Convert core.PodStatus to v1.PodStatus failed out.PodIP != in.PodIP[0].IP expected %v found %v", i, input.PodIPs[0].IP, v1PodStatus.PodIP)
|
||||
}
|
||||
// match v1.PodIPs to core.PodIPs
|
||||
for idx := range input.PodIPs {
|
||||
if v1PodStatus.PodIPs[idx].IP != input.PodIPs[idx].IP {
|
||||
t.Errorf("%v: Convert core.PodStatus to v1.PodStatus failed. Expected v1.PodStatus[%v]=%v but found %v", i, idx, input.PodIPs[idx].IP, v1PodStatus.PodIPs[idx].IP)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func Test_v1_PodStatus_to_core_PodStatus(t *testing.T) {
|
||||
// fail
|
||||
v1FailTestInputs := []v1.PodStatus{
|
||||
{
|
||||
PodIP: "1.1.2.1", // fail becaue PodIP != PodIPs[0]
|
||||
PodIPs: []v1.PodIP{
|
||||
{IP: "1.1.1.1"},
|
||||
{IP: "2.2.2.2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
// success
|
||||
v1TestInputs := []v1.PodStatus{
|
||||
// only Primary IP Provided
|
||||
{
|
||||
PodIP: "1.1.1.1",
|
||||
},
|
||||
{
|
||||
// both are not provided
|
||||
PodIP: "",
|
||||
PodIPs: nil,
|
||||
},
|
||||
// only list of IPs
|
||||
{
|
||||
PodIPs: []v1.PodIP{
|
||||
{IP: "1.1.1.1"},
|
||||
{IP: "2.2.2.2"},
|
||||
},
|
||||
},
|
||||
// Both
|
||||
{
|
||||
PodIP: "1.1.1.1",
|
||||
PodIPs: []v1.PodIP{
|
||||
{IP: "1.1.1.1"},
|
||||
{IP: "2.2.2.2"},
|
||||
},
|
||||
},
|
||||
// v4 and v6
|
||||
{
|
||||
PodIP: "1.1.1.1",
|
||||
PodIPs: []v1.PodIP{
|
||||
{IP: "1.1.1.1"},
|
||||
{IP: "::1"},
|
||||
},
|
||||
},
|
||||
// v6 and v4
|
||||
{
|
||||
PodIP: "::1",
|
||||
PodIPs: []v1.PodIP{
|
||||
{IP: "::1"},
|
||||
{IP: "1.1.1.1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
// run failed cases
|
||||
for i, failedTest := range v1FailTestInputs {
|
||||
corePodStatus := core.PodStatus{}
|
||||
// convert..
|
||||
if err := corev1.Convert_v1_PodStatus_To_core_PodStatus(&failedTest, &corePodStatus, nil); err == nil {
|
||||
t.Errorf("%v: Convert v1.PodStatus to core.PodStatus should have failed for input %+v", i, failedTest)
|
||||
}
|
||||
}
|
||||
|
||||
// run ok cases
|
||||
for i, testInput := range v1TestInputs {
|
||||
corePodStatus := core.PodStatus{}
|
||||
// convert..
|
||||
if err := corev1.Convert_v1_PodStatus_To_core_PodStatus(&testInput, &corePodStatus, nil); err != nil {
|
||||
t.Errorf("%v: Convert v1.PodStatus to core.PodStatus failed with error:%v for input %+v", i, err.Error(), testInput)
|
||||
}
|
||||
|
||||
if len(testInput.PodIP) == 0 && len(testInput.PodIPs) == 0 {
|
||||
continue //no more work needed
|
||||
}
|
||||
|
||||
// List should have at least 1 IP == v1.PodIP || v1.PodIPs[0] (whichever provided)
|
||||
if len(testInput.PodIP) > 0 && corePodStatus.PodIPs[0].IP != testInput.PodIP {
|
||||
t.Errorf("%v: Convert v1.PodStatus to core.PodStatus failed. expected corePodStatus.PodIPs[0].ip=%v found %v", i, corePodStatus.PodIPs[0].IP, corePodStatus.PodIPs[0].IP)
|
||||
}
|
||||
|
||||
// walk the list
|
||||
for idx := range testInput.PodIPs {
|
||||
if corePodStatus.PodIPs[idx].IP != testInput.PodIPs[idx].IP {
|
||||
t.Errorf("%v: Convert v1.PodStatus to core.PodStatus failed core.PodIPs[%v]=%v expected %v", i, idx, corePodStatus.PodIPs[idx].IP, testInput.PodIPs[idx].IP)
|
||||
}
|
||||
}
|
||||
|
||||
// if input has a list of IPs
|
||||
// then out put should have the same length
|
||||
if len(testInput.PodIPs) > 0 && len(testInput.PodIPs) != len(corePodStatus.PodIPs) {
|
||||
t.Errorf("%v: Convert v1.PodStatus to core.PodStatus failed len(core.PodIPs) != len(v1.PodStatus.PodIPs) [%v]=[%v]", i, len(corePodStatus.PodIPs), len(testInput.PodIPs))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_core_NodeSpec_to_v1_NodeSpec(t *testing.T) {
|
||||
// core to v1
|
||||
testInputs := []core.NodeSpec{
|
||||
{
|
||||
PodCIDRs: []string{"10.0.0.0/24", "10.0.1.0/24"},
|
||||
},
|
||||
{
|
||||
PodCIDRs: nil,
|
||||
},
|
||||
{
|
||||
PodCIDRs: []string{"10.0.0.0/24"},
|
||||
},
|
||||
{
|
||||
PodCIDRs: []string{"ace:cab:deca::/8"},
|
||||
},
|
||||
{
|
||||
PodCIDRs: []string{"10.0.0.0/24", "ace:cab:deca::/8"},
|
||||
},
|
||||
{
|
||||
PodCIDRs: []string{"ace:cab:deca::/8", "10.0.0.0/24"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, testInput := range testInputs {
|
||||
v1NodeSpec := v1.NodeSpec{}
|
||||
// convert
|
||||
if err := corev1.Convert_core_NodeSpec_To_v1_NodeSpec(&testInput, &v1NodeSpec, nil); nil != err {
|
||||
t.Errorf("%v: Convert core.NodeSpec to v1.NodeSpec failed with error %v", i, err.Error())
|
||||
}
|
||||
|
||||
if len(testInput.PodCIDRs) == 0 {
|
||||
continue // no more work needed
|
||||
}
|
||||
|
||||
// validate results
|
||||
if v1NodeSpec.PodCIDR != testInput.PodCIDRs[0] {
|
||||
t.Errorf("%v: Convert core.NodeSpec to v1.NodeSpec failed. Expected v1.PodCIDR=%v but found %v", i, testInput.PodCIDRs[0], v1NodeSpec.PodCIDR)
|
||||
}
|
||||
|
||||
// match v1.PodIPs to core.PodIPs
|
||||
for idx := range testInput.PodCIDRs {
|
||||
if v1NodeSpec.PodCIDRs[idx] != testInput.PodCIDRs[idx] {
|
||||
t.Errorf("%v: Convert core.NodeSpec to v1.NodeSpec failed. Expected v1.NodeSpec[%v]=%v but found %v", i, idx, testInput.PodCIDRs[idx], v1NodeSpec.PodCIDRs[idx])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_v1_NodeSpec_to_core_NodeSpec(t *testing.T) {
|
||||
failInputs := []v1.NodeSpec{
|
||||
{ // fail PodCIDRs[0] != PodCIDR
|
||||
PodCIDR: "10.0.0.0/24",
|
||||
PodCIDRs: []string{"10.0.1.0/24", "ace:cab:deca::/8"},
|
||||
},
|
||||
}
|
||||
|
||||
testInputs := []v1.NodeSpec{
|
||||
// cidr only - 4
|
||||
{
|
||||
PodCIDR: "10.0.1.0/24",
|
||||
},
|
||||
// cidr only - 6
|
||||
{
|
||||
PodCIDR: "ace:cab:deca::/8",
|
||||
},
|
||||
// Both are provided
|
||||
{
|
||||
PodCIDR: "10.0.1.0/24",
|
||||
PodCIDRs: []string{"10.0.1.0/24", "ace:cab:deca::/8"},
|
||||
},
|
||||
// list only
|
||||
{
|
||||
PodCIDRs: []string{"10.0.1.0/24", "ace:cab:deca::/8"},
|
||||
},
|
||||
// Both are provided 4,6
|
||||
{
|
||||
PodCIDR: "10.0.1.0/24",
|
||||
PodCIDRs: []string{"10.0.1.0/24", "ace:cab:deca::/8"},
|
||||
},
|
||||
// Both are provided 6,4
|
||||
{
|
||||
PodCIDR: "ace:cab:deca::/8",
|
||||
PodCIDRs: []string{"ace:cab:deca::/8", "10.0.1.0/24"},
|
||||
},
|
||||
// list only 4,6
|
||||
{
|
||||
PodCIDRs: []string{"10.0.1.0/24", "ace:cab:deca::/8"},
|
||||
},
|
||||
// list only 6,4
|
||||
{
|
||||
PodCIDRs: []string{"ace:cab:deca::/8", "10.0.1.0/24"},
|
||||
},
|
||||
// no cidr and no cidrs
|
||||
{
|
||||
PodCIDR: "",
|
||||
PodCIDRs: nil,
|
||||
},
|
||||
}
|
||||
|
||||
// fail cases
|
||||
for i, failInput := range failInputs {
|
||||
coreNodeSpec := core.NodeSpec{}
|
||||
if err := corev1.Convert_v1_NodeSpec_To_core_NodeSpec(&failInput, &coreNodeSpec, nil); err == nil {
|
||||
t.Errorf("%v: Convert v1.NodeSpec to core.NodeSpec failed. Expected an error when coreNodeSpec.PodCIDR != coreNodeSpec.PodCIDRs[0]", i)
|
||||
}
|
||||
}
|
||||
|
||||
for i, testInput := range testInputs {
|
||||
coreNodeSpec := core.NodeSpec{}
|
||||
if err := corev1.Convert_v1_NodeSpec_To_core_NodeSpec(&testInput, &coreNodeSpec, nil); err != nil {
|
||||
t.Errorf("%v:Convert v1.NodeSpec to core.NodeSpec failed with error:%v", i, err.Error())
|
||||
}
|
||||
if len(testInput.PodCIDRs) == 0 && len(testInput.PodCIDR) == 0 {
|
||||
continue // no more work needed
|
||||
}
|
||||
if len(testInput.PodCIDR) > 0 && coreNodeSpec.PodCIDRs[0] != testInput.PodCIDR {
|
||||
t.Errorf("%v:Convert v1.NodeSpec to core.NodeSpec failed. expected coreNodeSpec.PodCIDRs[0]=%v found %v", i, testInput.PodCIDR, coreNodeSpec.PodCIDRs[0])
|
||||
}
|
||||
// match ip list
|
||||
for idx := range testInput.PodCIDRs {
|
||||
if coreNodeSpec.PodCIDRs[idx] != testInput.PodCIDRs[idx] {
|
||||
t.Errorf("%v:Convert v1.NodeSpec to core.NodeSpec failed core.PodCIDRs[%v]=%v expected %v", i, idx, coreNodeSpec.PodCIDRs[idx], testInput.PodCIDRs[idx])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -54,6 +54,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/fieldpath"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
const isNegativeErrorMsg string = apimachineryvalidation.IsNegativeErrorMsg
|
||||
@ -3030,6 +3031,44 @@ func ValidatePod(pod *core.Pod) field.ErrorList {
|
||||
allErrs = append(allErrs, field.Invalid(specPath, hugePageResources, "must use a single hugepage size in a pod spec"))
|
||||
}
|
||||
|
||||
podIPsField := field.NewPath("status", "podIPs")
|
||||
|
||||
// all PodIPs must be valid IPs
|
||||
for i, podIP := range pod.Status.PodIPs {
|
||||
for _, msg := range validation.IsValidIP(podIP.IP) {
|
||||
allErrs = append(allErrs, field.Invalid(podIPsField.Index(i), podIP.IP, msg))
|
||||
}
|
||||
}
|
||||
|
||||
// if we have more than one Pod.PodIP then
|
||||
// - validate for dual stack
|
||||
// - validate for duplication
|
||||
if len(pod.Status.PodIPs) > 1 {
|
||||
podIPs := make([]string, 0, len(pod.Status.PodIPs))
|
||||
for _, podIP := range pod.Status.PodIPs {
|
||||
podIPs = append(podIPs, podIP.IP)
|
||||
}
|
||||
|
||||
dualStack, err := netutils.IsDualStackIPStrings(podIPs)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.InternalError(podIPsField, fmt.Errorf("failed to check for dual stack with error:%v", err)))
|
||||
}
|
||||
|
||||
// We only support one from each IP family (i.e. max two IPs in this list).
|
||||
if !dualStack || len(podIPs) > 2 {
|
||||
allErrs = append(allErrs, field.Invalid(podIPsField, pod.Status.PodIPs, "may specify no more than one IP for each IP family"))
|
||||
}
|
||||
|
||||
// There should be no duplicates in list of Pod.PodIPs
|
||||
seen := sets.String{} //:= make(map[string]int)
|
||||
for i, podIP := range pod.Status.PodIPs {
|
||||
if seen.Has(podIP.IP) {
|
||||
allErrs = append(allErrs, field.Duplicate(podIPsField.Index(i), podIP))
|
||||
}
|
||||
seen.Insert(podIP.IP)
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@ -4205,12 +4244,40 @@ func ValidateNode(node *core.Node) field.ErrorList {
|
||||
// That said, if specified, we need to ensure they are valid.
|
||||
allErrs = append(allErrs, ValidateNodeResources(node)...)
|
||||
|
||||
if len(node.Spec.PodCIDR) != 0 {
|
||||
_, err := ValidateCIDR(node.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "podCIDR"), node.Spec.PodCIDR, "not a valid CIDR"))
|
||||
// validate PodCIDRS only if we need to
|
||||
if len(node.Spec.PodCIDRs) > 0 {
|
||||
podCIDRsField := field.NewPath("spec", "podCIDRs")
|
||||
|
||||
// all PodCIDRs should be valid ones
|
||||
for idx, value := range node.Spec.PodCIDRs {
|
||||
if _, err := ValidateCIDR(value); err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(podCIDRsField.Index(idx), node.Spec.PodCIDRs, "must be valid CIDR"))
|
||||
}
|
||||
}
|
||||
|
||||
// if more than PodCIDR then
|
||||
// - validate for dual stack
|
||||
// - validate for duplication
|
||||
if len(node.Spec.PodCIDRs) > 1 {
|
||||
dualStack, err := netutils.IsDualStackCIDRStrings(node.Spec.PodCIDRs)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.InternalError(podCIDRsField, fmt.Errorf("invalid PodCIDRs. failed to check with dual stack with error:%v", err)))
|
||||
}
|
||||
if !dualStack || len(node.Spec.PodCIDRs) > 2 {
|
||||
allErrs = append(allErrs, field.Invalid(podCIDRsField, node.Spec.PodCIDRs, "may specify no more than one CIDR for each IP family"))
|
||||
}
|
||||
|
||||
// PodCIDRs must not contain duplicates
|
||||
seen := sets.String{}
|
||||
for i, value := range node.Spec.PodCIDRs {
|
||||
if seen.Has(value) {
|
||||
allErrs = append(allErrs, field.Duplicate(podCIDRsField.Index(i), value))
|
||||
}
|
||||
seen.Insert(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@ -4269,12 +4336,20 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList {
|
||||
addresses[address] = true
|
||||
}
|
||||
|
||||
if len(oldNode.Spec.PodCIDR) == 0 {
|
||||
if len(oldNode.Spec.PodCIDRs) == 0 {
|
||||
// Allow the controller manager to assign a CIDR to a node if it doesn't have one.
|
||||
oldNode.Spec.PodCIDR = node.Spec.PodCIDR
|
||||
//this is a no op for a string slice.
|
||||
oldNode.Spec.PodCIDRs = node.Spec.PodCIDRs
|
||||
} else {
|
||||
if oldNode.Spec.PodCIDR != node.Spec.PodCIDR {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDR"), "node updates may not change podCIDR except from \"\" to valid"))
|
||||
// compare the entire slice
|
||||
if len(oldNode.Spec.PodCIDRs) != len(node.Spec.PodCIDRs) {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDRs"), "node updates may not change podCIDR except from \"\" to valid"))
|
||||
} else {
|
||||
for idx, value := range oldNode.Spec.PodCIDRs {
|
||||
if value != node.Spec.PodCIDRs[idx] {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDRs"), "node updates may not change podCIDR except from \"\" to valid"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10326,7 +10326,7 @@ func TestValidateNode(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDR: "192.168.0.0/16",
|
||||
PodCIDRs: []string{"192.168.0.0/16"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -10533,7 +10533,24 @@ func TestValidateNode(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDR: "192.168.0.0",
|
||||
PodCIDRs: []string{"192.168.0.0"},
|
||||
},
|
||||
},
|
||||
"duplicate-pod-cidr": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "abc",
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Addresses: []core.NodeAddress{
|
||||
{Type: core.NodeExternalIP, Address: "something"},
|
||||
},
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName(core.ResourceCPU): resource.MustParse("10"),
|
||||
core.ResourceName(core.ResourceMemory): resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDRs: []string{"10.0.0.1/16", "10.0.0.1/16"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -10616,14 +10633,14 @@ func TestValidateNodeUpdate(t *testing.T) {
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDR: "",
|
||||
PodCIDRs: []string{},
|
||||
},
|
||||
}, core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDR: "192.168.0.0/16",
|
||||
PodCIDRs: []string{"192.168.0.0/16"},
|
||||
},
|
||||
}, true},
|
||||
{core.Node{
|
||||
@ -10631,14 +10648,14 @@ func TestValidateNodeUpdate(t *testing.T) {
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDR: "192.123.0.0/16",
|
||||
PodCIDRs: []string{"192.123.0.0/16"},
|
||||
},
|
||||
}, core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDR: "192.168.0.0/16",
|
||||
PodCIDRs: []string{"192.168.0.0/16"},
|
||||
},
|
||||
}, false},
|
||||
{core.Node{
|
||||
@ -10925,6 +10942,66 @@ func TestValidateNodeUpdate(t *testing.T) {
|
||||
ProviderID: "provider:///new",
|
||||
},
|
||||
}, false},
|
||||
{core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-cidrs-as-is",
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDRs: []string{"192.168.0.0/16"},
|
||||
},
|
||||
}, core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-cidrs-as-is",
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDRs: []string{"192.168.0.0/16"},
|
||||
},
|
||||
}, true},
|
||||
{core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-cidrs-as-is-2",
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDRs: []string{"192.168.0.0/16", "2000::/10"},
|
||||
},
|
||||
}, core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-cidrs-as-is-2",
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDRs: []string{"192.168.0.0/16", "2000::/10"},
|
||||
},
|
||||
}, true},
|
||||
{core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-cidrs-not-same-length",
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDRs: []string{"192.168.0.0/16", "192.167.0.0/16", "2000::/10"},
|
||||
},
|
||||
}, core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-cidrs-not-same-length",
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDRs: []string{"192.168.0.0/16", "2000::/10"},
|
||||
},
|
||||
}, false},
|
||||
{core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-cidrs-not-same",
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDRs: []string{"192.168.0.0/16", "2000::/10"},
|
||||
},
|
||||
}, core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-cidrs-not-same",
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDRs: []string{"2000::/10", "192.168.0.0/16"},
|
||||
},
|
||||
}, false},
|
||||
}
|
||||
for i, test := range tests {
|
||||
test.oldNode.ObjectMeta.ResourceVersion = "1"
|
||||
@ -13542,3 +13619,188 @@ func TestValidateOverhead(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// helper creates a pod with name, namespace and IPs
|
||||
func makePod(podName string, podNamespace string, podIPs []core.PodIP) core.Pod {
|
||||
return core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace},
|
||||
Spec: core.PodSpec{
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File",
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
Status: core.PodStatus{
|
||||
PodIPs: podIPs,
|
||||
},
|
||||
}
|
||||
}
|
||||
func TestPodIPsValidation(t *testing.T) {
|
||||
testCases := []struct {
|
||||
pod core.Pod
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
expectError: false,
|
||||
pod: makePod("nil-ips", "ns", nil),
|
||||
},
|
||||
{
|
||||
expectError: false,
|
||||
pod: makePod("empty-podips-list", "ns", []core.PodIP{}),
|
||||
},
|
||||
{
|
||||
expectError: false,
|
||||
pod: makePod("single-ip-family-6", "ns", []core.PodIP{{IP: "::1"}}),
|
||||
},
|
||||
{
|
||||
expectError: false,
|
||||
pod: makePod("single-ip-family-4", "ns", []core.PodIP{{IP: "1.1.1.1"}}),
|
||||
},
|
||||
{
|
||||
expectError: false,
|
||||
pod: makePod("dual-stack-4-6", "ns", []core.PodIP{{IP: "1.1.1.1"}, {IP: "::1"}}),
|
||||
},
|
||||
{
|
||||
expectError: false,
|
||||
pod: makePod("dual-stack-6-4", "ns", []core.PodIP{{IP: "::1"}, {IP: "1.1.1.1"}}),
|
||||
},
|
||||
/* failure cases start here */
|
||||
{
|
||||
expectError: true,
|
||||
pod: makePod("invalid-pod-ip", "ns", []core.PodIP{{IP: "this-is-not-an-ip"}}),
|
||||
},
|
||||
{
|
||||
expectError: true,
|
||||
pod: makePod("dualstack-same-ip-family-6", "ns", []core.PodIP{{IP: "::1"}, {IP: "::2"}}),
|
||||
},
|
||||
{
|
||||
expectError: true,
|
||||
pod: makePod("dualstack-same-ip-family-4", "ns", []core.PodIP{{IP: "1.1.1.1"}, {IP: "2.2.2.2"}}),
|
||||
},
|
||||
{
|
||||
expectError: true,
|
||||
pod: makePod("dualstack-repeated-ip-family-6", "ns", []core.PodIP{{IP: "1.1.1.1"}, {IP: "::1"}, {IP: "::2"}}),
|
||||
},
|
||||
{
|
||||
expectError: true,
|
||||
pod: makePod("dualstack-repeated-ip-family-4", "ns", []core.PodIP{{IP: "1.1.1.1"}, {IP: "::1"}, {IP: "2.2.2.2"}}),
|
||||
},
|
||||
|
||||
{
|
||||
expectError: true,
|
||||
pod: makePod("dualstack-duplicate-ip-family-4", "ns", []core.PodIP{{IP: "1.1.1.1"}, {IP: "1.1.1.1"}, {IP: "::1"}}),
|
||||
},
|
||||
{
|
||||
expectError: true,
|
||||
pod: makePod("dualstack-duplicate-ip-family-6", "ns", []core.PodIP{{IP: "1.1.1.1"}, {IP: "::1"}, {IP: "::1"}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
errs := ValidatePod(&testCase.pod)
|
||||
if len(errs) == 0 && testCase.expectError {
|
||||
t.Errorf("expected failure for %s, but there were none", testCase.pod.Name)
|
||||
return
|
||||
}
|
||||
if len(errs) != 0 && !testCase.expectError {
|
||||
t.Errorf("expected success for %s, but there were errors: %v", testCase.pod.Name, errs)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makes a node with pod cidr and a name
|
||||
func makeNode(nodeName string, podCIDRs []string) core.Node {
|
||||
return core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Addresses: []core.NodeAddress{
|
||||
{Type: core.NodeExternalIP, Address: "something"},
|
||||
},
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName(core.ResourceCPU): resource.MustParse("10"),
|
||||
core.ResourceName(core.ResourceMemory): resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
Spec: core.NodeSpec{
|
||||
PodCIDRs: podCIDRs,
|
||||
},
|
||||
}
|
||||
}
|
||||
func TestValidateNodeCIDRs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
expectError bool
|
||||
node core.Node
|
||||
}{
|
||||
{
|
||||
expectError: false,
|
||||
node: makeNode("nil-pod-cidr", nil),
|
||||
},
|
||||
{
|
||||
expectError: false,
|
||||
node: makeNode("empty-pod-cidr", []string{}),
|
||||
},
|
||||
{
|
||||
expectError: false,
|
||||
node: makeNode("single-pod-cidr-4", []string{"192.168.0.0/16"}),
|
||||
},
|
||||
{
|
||||
expectError: false,
|
||||
node: makeNode("single-pod-cidr-6", []string{"2000::/10"}),
|
||||
},
|
||||
|
||||
{
|
||||
expectError: false,
|
||||
node: makeNode("multi-pod-cidr-6-4", []string{"2000::/10", "192.168.0.0/16"}),
|
||||
},
|
||||
{
|
||||
expectError: false,
|
||||
node: makeNode("multi-pod-cidr-4-6", []string{"192.168.0.0/16", "2000::/10"}),
|
||||
},
|
||||
// error cases starts here
|
||||
{
|
||||
expectError: true,
|
||||
node: makeNode("invalid-pod-cidr", []string{"this-is-not-a-valid-cidr"}),
|
||||
},
|
||||
{
|
||||
expectError: true,
|
||||
node: makeNode("duplicate-pod-cidr-4", []string{"10.0.0.1/16", "10.0.0.1/16"}),
|
||||
},
|
||||
{
|
||||
expectError: true,
|
||||
node: makeNode("duplicate-pod-cidr-6", []string{"2000::/10", "2000::/10"}),
|
||||
},
|
||||
{
|
||||
expectError: true,
|
||||
node: makeNode("not-a-dualstack-no-v4", []string{"2000::/10", "3000::/10"}),
|
||||
},
|
||||
{
|
||||
expectError: true,
|
||||
node: makeNode("not-a-dualstack-no-v6", []string{"10.0.0.0/16", "10.1.0.0/16"}),
|
||||
},
|
||||
{
|
||||
expectError: true,
|
||||
node: makeNode("not-a-dualstack-repeated-v6", []string{"2000::/10", "10.0.0.0/16", "3000::/10"}),
|
||||
},
|
||||
{
|
||||
expectError: true,
|
||||
node: makeNode("not-a-dualstack-repeated-v4", []string{"10.0.0.0/16", "3000::/10", "10.1.0.0/16"}),
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
errs := ValidateNode(&testCase.node)
|
||||
if len(errs) == 0 && testCase.expectError {
|
||||
t.Errorf("expected failure for %s, but there were none", testCase.node.Name)
|
||||
return
|
||||
}
|
||||
if len(errs) != 0 && !testCase.expectError {
|
||||
t.Errorf("expected success for %s, but there were errors: %v", testCase.node.Name, errs)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -696,7 +696,9 @@ func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) {
|
||||
if len(pod.Status.Message) > 0 {
|
||||
w.Write(LEVEL_0, "Message:\t%s\n", pod.Status.Message)
|
||||
}
|
||||
// remove when .IP field is depreciated
|
||||
w.Write(LEVEL_0, "IP:\t%s\n", pod.Status.PodIP)
|
||||
describePodIPs(pod, w, "")
|
||||
if controlledBy := printController(pod); len(controlledBy) > 0 {
|
||||
w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy)
|
||||
}
|
||||
@ -753,6 +755,17 @@ func printController(controllee metav1.Object) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func describePodIPs(pod *corev1.Pod, w PrefixWriter, space string) {
|
||||
if len(pod.Status.PodIPs) == 0 {
|
||||
w.Write(LEVEL_0, "%sIPs:\t<none>\n", space)
|
||||
return
|
||||
}
|
||||
w.Write(LEVEL_0, "%sIPs:\n", space)
|
||||
for _, ipInfo := range pod.Status.PodIPs {
|
||||
w.Write(LEVEL_1, "IP:\t%s\n", ipInfo.IP)
|
||||
}
|
||||
}
|
||||
|
||||
func describeVolumes(volumes []corev1.Volume, w PrefixWriter, space string) {
|
||||
if volumes == nil || len(volumes) == 0 {
|
||||
w.Write(LEVEL_0, "%sVolumes:\t<none>\n", space)
|
||||
@ -2949,9 +2962,14 @@ func describeNode(node *corev1.Node, nodeNonTerminatedPodsList *corev1.PodList,
|
||||
w.Write(LEVEL_0, " Kubelet Version:\t%s\n", node.Status.NodeInfo.KubeletVersion)
|
||||
w.Write(LEVEL_0, " Kube-Proxy Version:\t%s\n", node.Status.NodeInfo.KubeProxyVersion)
|
||||
|
||||
// remove when .PodCIDR is depreciated
|
||||
if len(node.Spec.PodCIDR) > 0 {
|
||||
w.Write(LEVEL_0, "PodCIDR:\t%s\n", node.Spec.PodCIDR)
|
||||
}
|
||||
|
||||
if len(node.Spec.PodCIDRs) > 0 {
|
||||
w.Write(LEVEL_0, "PodCIDRs:\t%s\n", strings.Join(node.Spec.PodCIDRs, ","))
|
||||
}
|
||||
if len(node.Spec.ProviderID) > 0 {
|
||||
w.Write(LEVEL_0, "ProviderID:\t%s\n", node.Spec.ProviderID)
|
||||
}
|
||||
|
@ -643,7 +643,10 @@ func printPod(pod *api.Pod, options printers.PrintOptions) ([]metav1.TableRow, e
|
||||
if options.Wide {
|
||||
nodeName := pod.Spec.NodeName
|
||||
nominatedNodeName := pod.Status.NominatedNodeName
|
||||
podIP := pod.Status.PodIP
|
||||
podIP := ""
|
||||
if len(pod.Status.PodIPs) > 0 {
|
||||
podIP = pod.Status.PodIPs[0].IP
|
||||
}
|
||||
|
||||
if podIP == "" {
|
||||
podIP = "<none>"
|
||||
|
@ -1703,8 +1703,49 @@ func TestPrintPodwide(t *testing.T) {
|
||||
Status: api.ConditionTrue,
|
||||
},
|
||||
},
|
||||
Phase: "podPhase",
|
||||
PodIP: "1.1.1.1",
|
||||
Phase: "podPhase",
|
||||
PodIPs: []api.PodIP{{IP: "1.1.1.1"}},
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
|
||||
{RestartCount: 3},
|
||||
},
|
||||
NominatedNodeName: "node1",
|
||||
},
|
||||
},
|
||||
[]metav1beta1.TableRow{{Cells: []interface{}{"test1", "1/2", "podPhase", int64(6), "<unknown>", "1.1.1.1", "test1", "node1", "1/3"}}},
|
||||
},
|
||||
{
|
||||
// Test when the NodeName and PodIP are not none
|
||||
api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test1"},
|
||||
Spec: api.PodSpec{
|
||||
Containers: make([]api.Container, 2),
|
||||
NodeName: "test1",
|
||||
ReadinessGates: []api.PodReadinessGate{
|
||||
{
|
||||
ConditionType: api.PodConditionType(condition1),
|
||||
},
|
||||
{
|
||||
ConditionType: api.PodConditionType(condition2),
|
||||
},
|
||||
{
|
||||
ConditionType: api.PodConditionType(condition3),
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Conditions: []api.PodCondition{
|
||||
{
|
||||
Type: api.PodConditionType(condition1),
|
||||
Status: api.ConditionFalse,
|
||||
},
|
||||
{
|
||||
Type: api.PodConditionType(condition2),
|
||||
Status: api.ConditionTrue,
|
||||
},
|
||||
},
|
||||
Phase: "podPhase",
|
||||
PodIPs: []api.PodIP{{IP: "1.1.1.1"}, {IP: "2001:db8::"}},
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
|
||||
{RestartCount: 3},
|
||||
@ -1724,7 +1765,6 @@ func TestPrintPodwide(t *testing.T) {
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Phase: "podPhase",
|
||||
PodIP: "",
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
|
||||
{State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerWaitingReason"}}, RestartCount: 3},
|
||||
|
@ -246,6 +246,7 @@ func TestCreateSetsFields(t *testing.T) {
|
||||
|
||||
func TestResourceLocation(t *testing.T) {
|
||||
expectedIP := "1.2.3.4"
|
||||
expectedIP6 := "2001:db8::"
|
||||
testCases := []struct {
|
||||
pod api.Pod
|
||||
query string
|
||||
@ -254,7 +255,7 @@ func TestResourceLocation(t *testing.T) {
|
||||
{
|
||||
pod: api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
Status: api.PodStatus{PodIP: expectedIP},
|
||||
Status: api.PodStatus{PodIPs: []api.PodIP{{IP: expectedIP}}},
|
||||
},
|
||||
query: "foo",
|
||||
location: expectedIP,
|
||||
@ -262,7 +263,7 @@ func TestResourceLocation(t *testing.T) {
|
||||
{
|
||||
pod: api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
Status: api.PodStatus{PodIP: expectedIP},
|
||||
Status: api.PodStatus{PodIPs: []api.PodIP{{IP: expectedIP}}},
|
||||
},
|
||||
query: "foo:12345",
|
||||
location: expectedIP + ":12345",
|
||||
@ -275,7 +276,7 @@ func TestResourceLocation(t *testing.T) {
|
||||
{Name: "ctr"},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{PodIP: expectedIP},
|
||||
Status: api.PodStatus{PodIPs: []api.PodIP{{IP: expectedIP}}},
|
||||
},
|
||||
query: "foo",
|
||||
location: expectedIP,
|
||||
@ -288,7 +289,7 @@ func TestResourceLocation(t *testing.T) {
|
||||
{Name: "ctr", Ports: []api.ContainerPort{{ContainerPort: 9376}}},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{PodIP: expectedIP},
|
||||
Status: api.PodStatus{PodIPs: []api.PodIP{{IP: expectedIP}}},
|
||||
},
|
||||
query: "foo",
|
||||
location: expectedIP + ":9376",
|
||||
@ -301,7 +302,7 @@ func TestResourceLocation(t *testing.T) {
|
||||
{Name: "ctr", Ports: []api.ContainerPort{{ContainerPort: 9376}}},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{PodIP: expectedIP},
|
||||
Status: api.PodStatus{PodIPs: []api.PodIP{{IP: expectedIP}}},
|
||||
},
|
||||
query: "foo:12345",
|
||||
location: expectedIP + ":12345",
|
||||
@ -315,7 +316,7 @@ func TestResourceLocation(t *testing.T) {
|
||||
{Name: "ctr2", Ports: []api.ContainerPort{{ContainerPort: 9376}}},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{PodIP: expectedIP},
|
||||
Status: api.PodStatus{PodIPs: []api.PodIP{{IP: expectedIP}}},
|
||||
},
|
||||
query: "foo",
|
||||
location: expectedIP + ":9376",
|
||||
@ -329,7 +330,21 @@ func TestResourceLocation(t *testing.T) {
|
||||
{Name: "ctr2", Ports: []api.ContainerPort{{ContainerPort: 1234}}},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{PodIP: expectedIP},
|
||||
Status: api.PodStatus{PodIPs: []api.PodIP{{IP: expectedIP}}},
|
||||
},
|
||||
query: "foo",
|
||||
location: expectedIP + ":9376",
|
||||
},
|
||||
{
|
||||
pod: api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "ctr1", Ports: []api.ContainerPort{{ContainerPort: 9376}}},
|
||||
{Name: "ctr2", Ports: []api.ContainerPort{{ContainerPort: 1234}}},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{PodIPs: []api.PodIP{{IP: expectedIP}, {IP: expectedIP6}}},
|
||||
},
|
||||
query: "foo",
|
||||
location: expectedIP + ":9376",
|
||||
@ -451,8 +466,46 @@ func TestConvertToTableList(t *testing.T) {
|
||||
Status: api.ConditionTrue,
|
||||
},
|
||||
},
|
||||
PodIP: "10.1.2.3",
|
||||
Phase: api.PodPending,
|
||||
PodIPs: []api.PodIP{{IP: "10.1.2.3"}},
|
||||
Phase: api.PodPending,
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{Name: "ctr1", State: api.ContainerState{Running: &api.ContainerStateRunning{}}, RestartCount: 10, Ready: true},
|
||||
{Name: "ctr2", State: api.ContainerState{Waiting: &api.ContainerStateWaiting{}}, RestartCount: 0},
|
||||
},
|
||||
NominatedNodeName: "nominated-node",
|
||||
},
|
||||
}
|
||||
|
||||
multiIPsPod := &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "foo", CreationTimestamp: metav1.NewTime(time.Now().Add(-370 * 24 * time.Hour))},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "ctr1"},
|
||||
{Name: "ctr2", Ports: []api.ContainerPort{{ContainerPort: 9376}}},
|
||||
},
|
||||
NodeName: "test-node",
|
||||
ReadinessGates: []api.PodReadinessGate{
|
||||
{
|
||||
ConditionType: api.PodConditionType(condition1),
|
||||
},
|
||||
{
|
||||
ConditionType: api.PodConditionType(condition2),
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Conditions: []api.PodCondition{
|
||||
{
|
||||
Type: api.PodConditionType(condition1),
|
||||
Status: api.ConditionFalse,
|
||||
},
|
||||
{
|
||||
Type: api.PodConditionType(condition2),
|
||||
Status: api.ConditionTrue,
|
||||
},
|
||||
},
|
||||
PodIPs: []api.PodIP{{IP: "10.1.2.3"}, {IP: "2001:db8::"}},
|
||||
Phase: api.PodPending,
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{Name: "ctr1", State: api.ContainerState{Running: &api.ContainerStateRunning{}}, RestartCount: 10, Ready: true},
|
||||
{Name: "ctr2", State: api.ContainerState{Waiting: &api.ContainerStateWaiting{}}, RestartCount: 0},
|
||||
@ -492,6 +545,15 @@ func TestConvertToTableList(t *testing.T) {
|
||||
in: &api.PodList{},
|
||||
out: &metav1beta1.Table{ColumnDefinitions: columns},
|
||||
},
|
||||
{
|
||||
in: multiIPsPod,
|
||||
out: &metav1beta1.Table{
|
||||
ColumnDefinitions: columns,
|
||||
Rows: []metav1beta1.TableRow{
|
||||
{Cells: []interface{}{"foo", "1/2", "Pending", int64(10), "370d", "10.1.2.3", "test-node", "nominated-node", "1/2"}, Object: runtime.RawExtension{Object: multiIPsPod}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, test := range testCases {
|
||||
out, err := storage.ConvertToTable(ctx, test.in, nil)
|
||||
@ -855,60 +917,89 @@ func TestEtcdUpdateStatus(t *testing.T) {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
podIn := api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: "machine",
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Image: "foo:v2",
|
||||
ImagePullPolicy: api.PullIfNotPresent,
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
podsIn := []api.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
SecurityContext: &api.PodSecurityContext{},
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
Spec: api.PodSpec{
|
||||
NodeName: "machine",
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Image: "foo:v2",
|
||||
ImagePullPolicy: api.PullIfNotPresent,
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
},
|
||||
},
|
||||
SecurityContext: &api.PodSecurityContext{},
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Phase: api.PodRunning,
|
||||
PodIPs: []api.PodIP{{IP: "127.0.0.1"}},
|
||||
Message: "is now scheduled",
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Phase: api.PodRunning,
|
||||
PodIP: "127.0.0.1",
|
||||
Message: "is now scheduled",
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: "machine",
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Image: "foo:v2",
|
||||
ImagePullPolicy: api.PullIfNotPresent,
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
},
|
||||
},
|
||||
SecurityContext: &api.PodSecurityContext{},
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Phase: api.PodRunning,
|
||||
PodIPs: []api.PodIP{{IP: "127.0.0.1"}, {IP: "2001:db8::"}},
|
||||
Message: "is now scheduled",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expected := podStart
|
||||
expected.ResourceVersion = "2"
|
||||
grace := int64(30)
|
||||
enableServiceLinks := v1.DefaultEnableServiceLinks
|
||||
expected.Spec.TerminationGracePeriodSeconds = &grace
|
||||
expected.Spec.RestartPolicy = api.RestartPolicyAlways
|
||||
expected.Spec.DNSPolicy = api.DNSClusterFirst
|
||||
expected.Spec.EnableServiceLinks = &enableServiceLinks
|
||||
expected.Spec.Containers[0].ImagePullPolicy = api.PullIfNotPresent
|
||||
expected.Spec.Containers[0].TerminationMessagePath = api.TerminationMessagePathDefault
|
||||
expected.Spec.Containers[0].TerminationMessagePolicy = api.TerminationMessageReadFile
|
||||
expected.Labels = podIn.Labels
|
||||
expected.Status = podIn.Status
|
||||
for _, podIn := range podsIn {
|
||||
expected := podStart
|
||||
expected.ResourceVersion = "2"
|
||||
grace := int64(30)
|
||||
enableServiceLinks := v1.DefaultEnableServiceLinks
|
||||
expected.Spec.TerminationGracePeriodSeconds = &grace
|
||||
expected.Spec.RestartPolicy = api.RestartPolicyAlways
|
||||
expected.Spec.DNSPolicy = api.DNSClusterFirst
|
||||
expected.Spec.EnableServiceLinks = &enableServiceLinks
|
||||
expected.Spec.Containers[0].ImagePullPolicy = api.PullIfNotPresent
|
||||
expected.Spec.Containers[0].TerminationMessagePath = api.TerminationMessagePathDefault
|
||||
expected.Spec.Containers[0].TerminationMessagePolicy = api.TerminationMessageReadFile
|
||||
expected.Labels = podIn.Labels
|
||||
expected.Status = podIn.Status
|
||||
|
||||
_, _, err = statusStorage.Update(ctx, podIn.Name, rest.DefaultUpdatedObjectInfo(&podIn), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
obj, err := storage.Get(ctx, "foo", &metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
podOut := obj.(*api.Pod)
|
||||
// Check to verify the Label, and Status updates match from change above. Those are the fields changed.
|
||||
if !apiequality.Semantic.DeepEqual(podOut.Spec, expected.Spec) ||
|
||||
!apiequality.Semantic.DeepEqual(podOut.Labels, expected.Labels) ||
|
||||
!apiequality.Semantic.DeepEqual(podOut.Status, expected.Status) {
|
||||
t.Errorf("objects differ: %v", diff.ObjectDiff(podOut, expected))
|
||||
_, _, err = statusStorage.Update(ctx, podIn.Name, rest.DefaultUpdatedObjectInfo(&podIn), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
obj, err := storage.Get(ctx, "foo", &metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
podOut := obj.(*api.Pod)
|
||||
// Check to verify the Label, and Status updates match from change above. Those are the fields changed.
|
||||
if !apiequality.Semantic.DeepEqual(podOut.Spec, expected.Spec) ||
|
||||
!apiequality.Semantic.DeepEqual(podOut.Labels, expected.Labels) ||
|
||||
!apiequality.Semantic.DeepEqual(podOut.Status, expected.Status) {
|
||||
t.Errorf("objects differ: %v", diff.ObjectDiff(podOut, expected))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -212,7 +212,12 @@ func PodToSelectableFields(pod *api.Pod) fields.Set {
|
||||
podSpecificFieldsSet["spec.schedulerName"] = string(pod.Spec.SchedulerName)
|
||||
podSpecificFieldsSet["spec.serviceAccountName"] = string(pod.Spec.ServiceAccountName)
|
||||
podSpecificFieldsSet["status.phase"] = string(pod.Status.Phase)
|
||||
podSpecificFieldsSet["status.podIP"] = string(pod.Status.PodIP)
|
||||
// TODO: add podIPs as a downward API value(s) with proper format
|
||||
podIP := ""
|
||||
if len(pod.Status.PodIPs) > 0 {
|
||||
podIP = string(pod.Status.PodIPs[0].IP)
|
||||
}
|
||||
podSpecificFieldsSet["status.podIP"] = podIP
|
||||
podSpecificFieldsSet["status.nominatedNodeName"] = string(pod.Status.NominatedNodeName)
|
||||
return generic.AddObjectMetaFieldsSet(podSpecificFieldsSet, &pod.ObjectMeta, true)
|
||||
}
|
||||
@ -259,7 +264,7 @@ func ResourceLocation(getter ResourceGetter, rt http.RoundTripper, ctx context.C
|
||||
}
|
||||
}
|
||||
|
||||
if err := proxyutil.IsProxyableIP(pod.Status.PodIP); err != nil {
|
||||
if err := proxyutil.IsProxyableIP(pod.Status.PodIPs[0].IP); err != nil {
|
||||
return nil, nil, errors.NewBadRequest(err.Error())
|
||||
}
|
||||
|
||||
@ -267,9 +272,9 @@ func ResourceLocation(getter ResourceGetter, rt http.RoundTripper, ctx context.C
|
||||
Scheme: scheme,
|
||||
}
|
||||
if port == "" {
|
||||
loc.Host = pod.Status.PodIP
|
||||
loc.Host = pod.Status.PodIPs[0].IP
|
||||
} else {
|
||||
loc.Host = net.JoinHostPort(pod.Status.PodIP, port)
|
||||
loc.Host = net.JoinHostPort(pod.Status.PodIPs[0].IP, port)
|
||||
}
|
||||
return loc, rt, nil
|
||||
}
|
||||
|
@ -116,14 +116,22 @@ func TestMatchPod(t *testing.T) {
|
||||
},
|
||||
{
|
||||
in: &api.Pod{
|
||||
Status: api.PodStatus{PodIP: "1.2.3.4"},
|
||||
Status: api.PodStatus{
|
||||
PodIPs: []api.PodIP{
|
||||
{IP: "1.2.3.4"},
|
||||
},
|
||||
},
|
||||
},
|
||||
fieldSelector: fields.ParseSelectorOrDie("status.podIP=1.2.3.4"),
|
||||
expectMatch: true,
|
||||
},
|
||||
{
|
||||
in: &api.Pod{
|
||||
Status: api.PodStatus{PodIP: "1.2.3.4"},
|
||||
Status: api.PodStatus{
|
||||
PodIPs: []api.PodIP{
|
||||
{IP: "1.2.3.4"},
|
||||
},
|
||||
},
|
||||
},
|
||||
fieldSelector: fields.ParseSelectorOrDie("status.podIP=4.3.2.1"),
|
||||
expectMatch: false,
|
||||
@ -141,7 +149,30 @@ func TestMatchPod(t *testing.T) {
|
||||
},
|
||||
fieldSelector: fields.ParseSelectorOrDie("status.nominatedNodeName=node2"),
|
||||
expectMatch: false,
|
||||
}}
|
||||
},
|
||||
{
|
||||
in: &api.Pod{
|
||||
Status: api.PodStatus{
|
||||
PodIPs: []api.PodIP{
|
||||
{IP: "2001:db8::"},
|
||||
},
|
||||
},
|
||||
},
|
||||
fieldSelector: fields.ParseSelectorOrDie("status.podIP=2001:db8::"),
|
||||
expectMatch: true,
|
||||
},
|
||||
{
|
||||
in: &api.Pod{
|
||||
Status: api.PodStatus{
|
||||
PodIPs: []api.PodIP{
|
||||
{IP: "2001:db8::"},
|
||||
},
|
||||
},
|
||||
},
|
||||
fieldSelector: fields.ParseSelectorOrDie("status.podIP=2001:db7::"),
|
||||
expectMatch: false,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
m := MatchPod(labels.Everything(), testCase.fieldSelector)
|
||||
result, err := m.Matches(testCase.in)
|
||||
|
@ -539,8 +539,8 @@ func isValidAddress(ctx context.Context, addr *api.EndpointAddress, pods rest.Ge
|
||||
if pod == nil {
|
||||
return fmt.Errorf("pod is missing, skipping (%s/%s)", addr.TargetRef.Namespace, addr.TargetRef.Name)
|
||||
}
|
||||
if pod.Status.PodIP != addr.IP {
|
||||
return fmt.Errorf("pod ip doesn't match endpoint ip, skipping: %s vs %s (%s/%s)", pod.Status.PodIP, addr.IP, addr.TargetRef.Namespace, addr.TargetRef.Name)
|
||||
if pod.Status.PodIPs[0].IP != addr.IP {
|
||||
return fmt.Errorf("pod ip doesn't match endpoint ip, skipping: %s vs %s (%s/%s)", pod.Status.PodIPs[0].IP, addr.IP, addr.TargetRef.Namespace, addr.TargetRef.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1180,7 +1180,7 @@ func TestServiceRegistryResourceLocation(t *testing.T) {
|
||||
Containers: []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent, TerminationMessagePolicy: api.TerminationMessageReadFile}},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
PodIP: "1.2.3.4",
|
||||
PodIPs: []api.PodIP{{IP: "1.2.3.4"}, {IP: "2001:db7::"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1194,7 +1194,7 @@ func TestServiceRegistryResourceLocation(t *testing.T) {
|
||||
Containers: []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent, TerminationMessagePolicy: api.TerminationMessageReadFile}},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
PodIP: "1.2.3.5",
|
||||
PodIPs: []api.PodIP{{IP: "1.2.3.5"}, {IP: "2001:db8::"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -3126,6 +3126,14 @@ type PodDNSConfigOption struct {
|
||||
Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
|
||||
}
|
||||
|
||||
// IP address information for entries in the (plural) PodIPs field.
|
||||
// Each entry includes:
|
||||
// IP: An IP address allocated to the pod. Routable at least within the cluster.
|
||||
type PodIP struct {
|
||||
// ip is an IP address (IPv4 or IPv6) assigned to the pod
|
||||
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
|
||||
}
|
||||
|
||||
// PodStatus represents information about the status of a pod. Status may trail the actual
|
||||
// state of a system, especially if the node that hosts the pod cannot contact the control
|
||||
// plane.
|
||||
@ -3181,6 +3189,14 @@ type PodStatus struct {
|
||||
// +optional
|
||||
PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
|
||||
|
||||
// podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must
|
||||
// match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list
|
||||
// is empty if no IPs have been allocated yet.
|
||||
// +optional
|
||||
// +patchStrategy=merge
|
||||
// +patchMergeKey=ip
|
||||
PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"`
|
||||
|
||||
// RFC 3339 date and time at which the object was acknowledged by the Kubelet.
|
||||
// This is before the Kubelet pulled the container image(s) for the pod.
|
||||
// +optional
|
||||
@ -3901,6 +3917,14 @@ type NodeSpec struct {
|
||||
// PodCIDR represents the pod IP range assigned to the node.
|
||||
// +optional
|
||||
PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
|
||||
|
||||
// podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this
|
||||
// field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for
|
||||
// each of IPv4 and IPv6.
|
||||
// +optional
|
||||
// +patchStrategy=merge
|
||||
PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"`
|
||||
|
||||
// ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
|
||||
// +optional
|
||||
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
|
||||
|
Loading…
Reference in New Issue
Block a user