mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Merge pull request #327 from lavalamp/etcdChanFix
Improvements to integration test & logging
This commit is contained in:
commit
664c9faa43
@ -30,25 +30,18 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/master"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/coreos/go-etcd/etcd"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(4)
|
||||
util.ReallyCrash = true
|
||||
util.InitLogs()
|
||||
defer util.FlushLogs()
|
||||
var (
|
||||
fakeDocker1, fakeDocker2 kubelet.FakeDockerClient
|
||||
)
|
||||
|
||||
go func() {
|
||||
defer util.FlushLogs()
|
||||
time.Sleep(3 * time.Minute)
|
||||
glog.Fatalf("This test has timed out.")
|
||||
}()
|
||||
|
||||
manifestUrl := ServeCachedManifestFile()
|
||||
func startComponents(manifestURL string) (apiServerURL string) {
|
||||
// Setup
|
||||
servers := []string{"http://localhost:4001"}
|
||||
glog.Infof("Creating etcd client pointing to %v", servers)
|
||||
@ -63,23 +56,21 @@ func main() {
|
||||
controllerManager.Run(1 * time.Second)
|
||||
|
||||
// Kubelet
|
||||
fakeDocker1 := &kubelet.FakeDockerClient{}
|
||||
myKubelet := kubelet.Kubelet{
|
||||
Hostname: machineList[0],
|
||||
DockerClient: fakeDocker1,
|
||||
DockerClient: &fakeDocker1,
|
||||
DockerPuller: &kubelet.FakeDockerPuller{},
|
||||
FileCheckFrequency: 5 * time.Second,
|
||||
SyncFrequency: 5 * time.Second,
|
||||
HTTPCheckFrequency: 5 * time.Second,
|
||||
}
|
||||
go myKubelet.RunKubelet("", manifestUrl, servers[0], "localhost", "", 0)
|
||||
go myKubelet.RunKubelet("", manifestURL, servers[0], "localhost", "", 0)
|
||||
|
||||
// Create a second kubelet so that the guestbook example's two redis slaves both
|
||||
// have a place they can schedule.
|
||||
fakeDocker2 := &kubelet.FakeDockerClient{}
|
||||
otherKubelet := kubelet.Kubelet{
|
||||
Hostname: machineList[1],
|
||||
DockerClient: fakeDocker2,
|
||||
DockerClient: &fakeDocker2,
|
||||
DockerPuller: &kubelet.FakeDockerPuller{},
|
||||
FileCheckFrequency: 5 * time.Second,
|
||||
SyncFrequency: 5 * time.Second,
|
||||
@ -87,12 +78,10 @@ func main() {
|
||||
}
|
||||
go otherKubelet.RunKubelet("", "", servers[0], "localhost", "", 0)
|
||||
|
||||
// Ok. we're good to go.
|
||||
glog.Infof("API Server started on %s", apiserver.URL)
|
||||
// Wait for the synchronization threads to come up.
|
||||
time.Sleep(time.Second * 10)
|
||||
return apiserver.URL
|
||||
}
|
||||
|
||||
kubeClient := client.New(apiserver.URL, nil)
|
||||
func runReplicationControllerTest(kubeClient *client.Client) {
|
||||
data, err := ioutil.ReadFile("api/examples/controller.json")
|
||||
if err != nil {
|
||||
glog.Fatalf("Unexpected error: %#v", err)
|
||||
@ -109,31 +98,57 @@ func main() {
|
||||
time.Sleep(time.Second * 10)
|
||||
|
||||
// Validate that they're truly up.
|
||||
pods, err := kubeClient.ListPods(nil)
|
||||
if err != nil || len(pods.Items) != 2 {
|
||||
glog.Fatal("FAILED: %#v", pods.Items)
|
||||
pods, err := kubeClient.ListPods(labels.Set(controllerRequest.DesiredState.ReplicaSelector).AsSelector())
|
||||
if err != nil || len(pods.Items) != controllerRequest.DesiredState.Replicas {
|
||||
glog.Fatalf("FAILED: %#v", pods.Items)
|
||||
}
|
||||
glog.Infof("Replication controller produced:\n\n%#v\n\n", pods)
|
||||
}
|
||||
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(4)
|
||||
util.ReallyCrash = true
|
||||
util.InitLogs()
|
||||
defer util.FlushLogs()
|
||||
|
||||
go func() {
|
||||
defer util.FlushLogs()
|
||||
time.Sleep(3 * time.Minute)
|
||||
glog.Fatalf("This test has timed out.")
|
||||
}()
|
||||
|
||||
manifestURL := ServeCachedManifestFile()
|
||||
|
||||
apiServerURL := startComponents(manifestURL)
|
||||
|
||||
// Ok. we're good to go.
|
||||
glog.Infof("API Server started on %s", apiServerURL)
|
||||
// Wait for the synchronization threads to come up.
|
||||
time.Sleep(time.Second * 10)
|
||||
|
||||
kubeClient := client.New(apiServerURL, nil)
|
||||
runReplicationControllerTest(kubeClient)
|
||||
|
||||
// Check that kubelet tried to make the pods.
|
||||
// Using a set to list unique creation attempts. Our fake is
|
||||
// really stupid, so kubelet tries to create these multiple times.
|
||||
createdPods := map[string]struct{}{}
|
||||
createdPods := util.StringSet{}
|
||||
for _, p := range fakeDocker1.Created {
|
||||
// The last 8 characters are random, so slice them off.
|
||||
if n := len(p); n > 8 {
|
||||
createdPods[p[:n-8]] = struct{}{}
|
||||
createdPods.Insert(p[:n-8])
|
||||
}
|
||||
}
|
||||
for _, p := range fakeDocker2.Created {
|
||||
// The last 8 characters are random, so slice them off.
|
||||
if n := len(p); n > 8 {
|
||||
createdPods[p[:n-8]] = struct{}{}
|
||||
createdPods.Insert(p[:n-8])
|
||||
}
|
||||
}
|
||||
// We expect 5: 2 net containers + 2 pods from the replication controller +
|
||||
// 1 net container + 2 pods from the URL.
|
||||
if len(createdPods) != 7 {
|
||||
glog.Fatalf("Unexpected list of created pods: %#v %#v %#v\n", createdPods, fakeDocker1.Created, fakeDocker2.Created)
|
||||
glog.Fatalf("Unexpected list of created pods:\n\n%#v\n\n%#v\n\n%#v\n\n", createdPods.List(), fakeDocker1.Created, fakeDocker2.Created)
|
||||
}
|
||||
glog.Infof("OK")
|
||||
}
|
||||
|
@ -643,14 +643,14 @@ func (kl *Kubelet) syncManifest(manifest *api.ContainerManifest, keepChannel cha
|
||||
// Make sure we have a network container
|
||||
netId, err := kl.getNetworkContainerId(manifest)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to introspect network container. (%v) Skipping container %s", err, manifest.ID)
|
||||
glog.Errorf("Failed to introspect network container. (%v) Skipping manifest %s", err, manifest.ID)
|
||||
return err
|
||||
}
|
||||
if netId == "" {
|
||||
glog.Infof("Network container doesn't exist, creating")
|
||||
netId, err = kl.createNetworkContainer(manifest)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to introspect network container. (%v) Skipping container %s", err, manifest.ID)
|
||||
glog.Errorf("Failed to introspect network container. (%v) Skipping manifest %s", err, manifest.ID)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -658,24 +658,24 @@ func (kl *Kubelet) syncManifest(manifest *api.ContainerManifest, keepChannel cha
|
||||
for _, container := range manifest.Containers {
|
||||
containerId, err := kl.getContainerId(manifest, &container)
|
||||
if err != nil {
|
||||
glog.Errorf("Error finding container: %v skipping id %s.", err, manifest.ID)
|
||||
glog.Errorf("Error finding container: %v skipping manifest %s container %s.", err, manifest.ID, container.Name)
|
||||
continue
|
||||
}
|
||||
if containerId == "" {
|
||||
glog.Infof("%+v doesn't exist, creating", container)
|
||||
kl.DockerPuller.Pull(container.Image)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create container: %v Skipping container %s", err, manifest.ID)
|
||||
glog.Errorf("Failed to create container: %v skipping manifest %s container %s.", err, manifest.ID, container.Name)
|
||||
continue
|
||||
}
|
||||
containerId, err = kl.runContainer(manifest, &container, "container:"+string(netId))
|
||||
if err != nil {
|
||||
// TODO(bburns) : Perhaps blacklist a container after N failures?
|
||||
glog.Errorf("Error running container: %v skipping.", err)
|
||||
glog.Errorf("Error running manifest %s container %s: %v", manifest.ID, container.Name, err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
glog.V(1).Infof("%s exists as %v", container.Name, containerId)
|
||||
glog.V(1).Infof("manifest %s container %s exists as %v", manifest.ID, container.Name, containerId)
|
||||
}
|
||||
keepChannel <- containerId
|
||||
}
|
||||
|
@ -16,6 +16,10 @@ limitations under the License.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
type empty struct{}
|
||||
|
||||
// A set of strings, implemented via map[string]struct{} for minimal memory consumption.
|
||||
@ -45,3 +49,13 @@ func (s StringSet) Has(item string) bool {
|
||||
_, contained := s[item]
|
||||
return contained
|
||||
}
|
||||
|
||||
// Return the contents as a sorted string slice.
|
||||
func (s StringSet) List() []string {
|
||||
res := make([]string, 0, len(s))
|
||||
for key := range s {
|
||||
res = append(res, key)
|
||||
}
|
||||
sort.StringSlice(res).Sort()
|
||||
return res
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@ -51,3 +52,10 @@ func TestNewStringSet(t *testing.T) {
|
||||
t.Errorf("Unexpected contents: %#v", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringSetList(t *testing.T) {
|
||||
s := NewStringSet("z", "y", "x", "a")
|
||||
if !reflect.DeepEqual(s.List(), []string{"a", "x", "y", "z"}) {
|
||||
t.Errorf("List gave unexpected result: %#v", s.List())
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user