mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 10:19:50 +00:00
Merge pull request #14508 from lavalamp/fix-14477
Fix bug with empty subsets
This commit is contained in:
commit
42cb619471
@ -295,6 +295,9 @@ func (rs *REST) ResourceLocation(ctx api.Context, id string) (*url.URL, http.Rou
|
|||||||
// Find a Subset that has the port.
|
// Find a Subset that has the port.
|
||||||
for ssi := 0; ssi < len(eps.Subsets); ssi++ {
|
for ssi := 0; ssi < len(eps.Subsets); ssi++ {
|
||||||
ss := &eps.Subsets[(ssSeed+ssi)%len(eps.Subsets)]
|
ss := &eps.Subsets[(ssSeed+ssi)%len(eps.Subsets)]
|
||||||
|
if len(ss.Addresses) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
for i := range ss.Ports {
|
for i := range ss.Ports {
|
||||||
if ss.Ports[i].Name == portStr {
|
if ss.Ports[i].Name == portStr {
|
||||||
// Pick a random address.
|
// Pick a random address.
|
||||||
|
@ -440,6 +440,22 @@ func TestServiceRegistryResourceLocation(t *testing.T) {
|
|||||||
Ports: []api.EndpointPort{{Name: "", Port: 80}, {Name: "p", Port: 93}},
|
Ports: []api.EndpointPort{{Name: "", Port: 80}, {Name: "p", Port: 93}},
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
Namespace: api.NamespaceDefault,
|
||||||
|
},
|
||||||
|
Subsets: []api.EndpointSubset{{
|
||||||
|
Addresses: []api.EndpointAddress{},
|
||||||
|
Ports: []api.EndpointPort{{Name: "", Port: 80}, {Name: "p", Port: 93}},
|
||||||
|
}, {
|
||||||
|
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
|
||||||
|
Ports: []api.EndpointPort{{Name: "", Port: 80}, {Name: "p", Port: 93}},
|
||||||
|
}, {
|
||||||
|
Addresses: []api.EndpointAddress{{IP: "1.2.3.5"}},
|
||||||
|
Ports: []api.EndpointPort{},
|
||||||
|
}},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
storage, registry := NewTestREST(t, endpoints)
|
storage, registry := NewTestREST(t, endpoints)
|
||||||
|
@ -57,7 +57,7 @@ var _ = Describe("Autoscaling", func() {
|
|||||||
|
|
||||||
// Consume 60% CPU
|
// Consume 60% CPU
|
||||||
millicoresPerReplica := 600
|
millicoresPerReplica := 600
|
||||||
rc := NewStaticResourceConsumer("cpu-utilization", nodeCount*coresPerNode, millicoresPerReplica*nodeCount*coresPerNode, 0, int64(millicoresPerReplica), 100, f)
|
rc := NewStaticResourceConsumer("rc", nodeCount*coresPerNode, millicoresPerReplica*nodeCount*coresPerNode, 0, int64(millicoresPerReplica), 100, f)
|
||||||
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
|
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
|
||||||
|
|
||||||
rc.CleanUp()
|
rc.CleanUp()
|
||||||
@ -79,7 +79,7 @@ var _ = Describe("Autoscaling", func() {
|
|||||||
|
|
||||||
// Consume 60% of total memory capacity
|
// Consume 60% of total memory capacity
|
||||||
megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode)
|
megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode)
|
||||||
rc := NewStaticResourceConsumer("mem-utilization", nodeCount*coresPerNode, 0, megabytesPerReplica*nodeCount*coresPerNode, 100, int64(megabytesPerReplica+100), f)
|
rc := NewStaticResourceConsumer("rc", nodeCount*coresPerNode, 0, megabytesPerReplica*nodeCount*coresPerNode, 100, int64(megabytesPerReplica+100), f)
|
||||||
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
|
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
|
||||||
|
|
||||||
rc.CleanUp()
|
rc.CleanUp()
|
||||||
|
@ -262,6 +262,4 @@ func runServiceAndRCForResourceConsumer(c *client.Client, ns, name string, repli
|
|||||||
MemLimit: memLimitMb * 1024 * 1024, // MemLimit is in bytes
|
MemLimit: memLimitMb * 1024 * 1024, // MemLimit is in bytes
|
||||||
}
|
}
|
||||||
expectNoError(RunRC(config))
|
expectNoError(RunRC(config))
|
||||||
// Wait until endpoints are processed. Temporary workaround until #14477 is fixed.
|
|
||||||
time.Sleep(10 * time.Second)
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user