Merge pull request #40495 from yujuhong/fnv_hash

Automatic merge from submit-queue (batch tested with PRs 38739, 40480, 40495, 40172, 40393)

Use fnv hash in the CRI implementation

fnv is more stable than adler. This PR changes CRI implementation to
use fnv for generating container hashes, but leaving the old
implementation (dockertools/rkt). This is because hash is what kubelet
uses to identify a container -- changes to the hash will cause kubelet
to restart existing containers. This is ok for CRI implementation (which
requires a disruptive upgrade already), but not for older implementations.

#40140
This commit is contained in:
Kubernetes Submit Queue 2017-01-25 21:20:31 -08:00 committed by GitHub
commit 52863de51d
7 changed files with 26 additions and 16 deletions

View File

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"hash/adler32" "hash/adler32"
"hash/fnv"
"strings" "strings"
"time" "time"
@ -91,6 +92,15 @@ func ShouldContainerBeRestarted(container *v1.Container, pod *v1.Pod, podStatus
// HashContainer returns the hash of the container. It is used to compare // HashContainer returns the hash of the container. It is used to compare
// the running container with its desired spec. // the running container with its desired spec.
func HashContainer(container *v1.Container) uint64 { func HashContainer(container *v1.Container) uint64 {
hash := fnv.New32a()
hashutil.DeepHashObject(hash, *container)
return uint64(hash.Sum32())
}
// HashContainerLegacy returns the hash of the container. It is used to compare
// the running container with its desired spec.
// TODO: Delete this function after we deprecate dockertools.
func HashContainerLegacy(container *v1.Container) uint64 {
hash := adler32.New() hash := adler32.New()
hashutil.DeepHashObject(hash, *container) hashutil.DeepHashObject(hash, *container)
return uint64(hash.Sum32()) return uint64(hash.Sum32())

View File

@ -302,7 +302,7 @@ func (p dockerPuller) GetImageRef(image string) (string, error) {
// only occur when instances of the same container in the same pod have the same UID. The // only occur when instances of the same container in the same pod have the same UID. The
// chance is really slim. // chance is really slim.
func BuildDockerName(dockerName KubeletContainerName, container *v1.Container) (string, string, string) { func BuildDockerName(dockerName KubeletContainerName, container *v1.Container) (string, string, string) {
containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16) containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainerLegacy(container), 16)
stableName := fmt.Sprintf("%s_%s_%s_%s", stableName := fmt.Sprintf("%s_%s_%s_%s",
containerNamePrefix, containerNamePrefix,
containerName, containerName,

View File

@ -1075,7 +1075,7 @@ func (dm *DockerManager) podInfraContainerChanged(pod *v1.Pod, podInfraContainer
ImagePullPolicy: podInfraContainerImagePullPolicy, ImagePullPolicy: podInfraContainerImagePullPolicy,
Env: dm.podInfraContainerEnv, Env: dm.podInfraContainerEnv,
} }
return podInfraContainerStatus.Hash != kubecontainer.HashContainer(expectedPodInfraContainer), nil return podInfraContainerStatus.Hash != kubecontainer.HashContainerLegacy(expectedPodInfraContainer), nil
} }
// determine if the container root should be a read only filesystem. // determine if the container root should be a read only filesystem.
@ -2087,7 +2087,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *v1.Pod, podStatus *kube
// At this point, the container is running and pod infra container is good. // At this point, the container is running and pod infra container is good.
// We will look for changes and check healthiness for the container. // We will look for changes and check healthiness for the container.
expectedHash := kubecontainer.HashContainer(&container) expectedHash := kubecontainer.HashContainerLegacy(&container)
hash := containerStatus.Hash hash := containerStatus.Hash
containerChanged := hash != 0 && hash != expectedHash containerChanged := hash != 0 && hash != expectedHash
if containerChanged { if containerChanged {

View File

@ -585,7 +585,7 @@ func generatePodInfraContainerHash(pod *v1.Pod) uint64 {
Ports: ports, Ports: ports,
ImagePullPolicy: podInfraContainerImagePullPolicy, ImagePullPolicy: podInfraContainerImagePullPolicy,
} }
return kubecontainer.HashContainer(container) return kubecontainer.HashContainerLegacy(container)
} }
// runSyncPod is a helper function to retrieve the running pods from the fake // runSyncPod is a helper function to retrieve the running pods from the fake
@ -855,7 +855,7 @@ func TestSyncPodsDoesNothing(t *testing.T) {
fakeDocker.SetFakeRunningContainers([]*FakeContainer{ fakeDocker.SetFakeRunningContainers([]*FakeContainer{
{ {
ID: "1234", ID: "1234",
Name: "/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainer(&container), 16) + "_foo_new_12345678_0", Name: "/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainerLegacy(&container), 16) + "_foo_new_12345678_0",
}, },
{ {
ID: "9876", ID: "9876",
@ -885,14 +885,14 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
}, },
{ {
ID: "1234", ID: "1234",
Name: "/k8s_succeeded." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0", Name: "/k8s_succeeded." + strconv.FormatUint(kubecontainer.HashContainerLegacy(&containers[0]), 16) + "_foo_new_12345678_0",
ExitCode: 0, ExitCode: 0,
StartedAt: time.Now(), StartedAt: time.Now(),
FinishedAt: time.Now(), FinishedAt: time.Now(),
}, },
{ {
ID: "5678", ID: "5678",
Name: "/k8s_failed." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_foo_new_12345678_0", Name: "/k8s_failed." + strconv.FormatUint(kubecontainer.HashContainerLegacy(&containers[1]), 16) + "_foo_new_12345678_0",
ExitCode: 42, ExitCode: 42,
StartedAt: time.Now(), StartedAt: time.Now(),
FinishedAt: time.Now(), FinishedAt: time.Now(),
@ -964,7 +964,7 @@ func TestSyncPodBackoff(t *testing.T) {
Containers: containers, Containers: containers,
}) })
stableId := "k8s_bad." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_podfoo_new_12345678" stableId := "k8s_bad." + strconv.FormatUint(kubecontainer.HashContainerLegacy(&containers[1]), 16) + "_podfoo_new_12345678"
dockerContainers := []*FakeContainer{ dockerContainers := []*FakeContainer{
{ {
ID: "9876", ID: "9876",
@ -974,13 +974,13 @@ func TestSyncPodBackoff(t *testing.T) {
}, },
{ {
ID: "1234", ID: "1234",
Name: "/k8s_good." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_podfoo_new_12345678_0", Name: "/k8s_good." + strconv.FormatUint(kubecontainer.HashContainerLegacy(&containers[0]), 16) + "_podfoo_new_12345678_0",
StartedAt: startTime, StartedAt: startTime,
Running: true, Running: true,
}, },
{ {
ID: "5678", ID: "5678",
Name: "/k8s_bad." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_podfoo_new_12345678_0", Name: "/k8s_bad." + strconv.FormatUint(kubecontainer.HashContainerLegacy(&containers[1]), 16) + "_podfoo_new_12345678_0",
ExitCode: 42, ExitCode: 42,
StartedAt: startTime, StartedAt: startTime,
FinishedAt: fakeClock.Now(), FinishedAt: fakeClock.Now(),

View File

@ -82,7 +82,7 @@ func newLabels(container *v1.Container, pod *v1.Pod, restartCount int, enableCus
} }
labels[types.KubernetesContainerNameLabel] = container.Name labels[types.KubernetesContainerNameLabel] = container.Name
labels[kubernetesContainerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16) labels[kubernetesContainerHashLabel] = strconv.FormatUint(kubecontainer.HashContainerLegacy(container), 16)
labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(restartCount) labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(restartCount)
labels[kubernetesContainerTerminationMessagePathLabel] = container.TerminationMessagePath labels[kubernetesContainerTerminationMessagePathLabel] = container.TerminationMessagePath
labels[kubernetesContainerTerminationMessagePolicyLabel] = string(container.TerminationMessagePolicy) labels[kubernetesContainerTerminationMessagePolicyLabel] = string(container.TerminationMessagePolicy)

View File

@ -90,7 +90,7 @@ func TestLabels(t *testing.T) {
PodDeletionGracePeriod: pod.DeletionGracePeriodSeconds, PodDeletionGracePeriod: pod.DeletionGracePeriodSeconds,
PodTerminationGracePeriod: pod.Spec.TerminationGracePeriodSeconds, PodTerminationGracePeriod: pod.Spec.TerminationGracePeriodSeconds,
Name: container.Name, Name: container.Name,
Hash: strconv.FormatUint(kubecontainer.HashContainer(container), 16), Hash: strconv.FormatUint(kubecontainer.HashContainerLegacy(container), 16),
RestartCount: restartCount, RestartCount: restartCount,
TerminationMessagePath: container.TerminationMessagePath, TerminationMessagePath: container.TerminationMessagePath,
PreStopHandler: container.Lifecycle.PreStop, PreStopHandler: container.Lifecycle.PreStop,
@ -113,7 +113,7 @@ func TestLabels(t *testing.T) {
expected.PodTerminationGracePeriod = nil expected.PodTerminationGracePeriod = nil
expected.PreStopHandler = nil expected.PreStopHandler = nil
// Because container is changed, the Hash should be updated // Because container is changed, the Hash should be updated
expected.Hash = strconv.FormatUint(kubecontainer.HashContainer(container), 16) expected.Hash = strconv.FormatUint(kubecontainer.HashContainerLegacy(container), 16)
labels = newLabels(container, pod, restartCount, false) labels = newLabels(container, pod, restartCount, false)
containerInfo = getContainerInfoFromLabel(labels) containerInfo = getContainerInfoFromLabel(labels)
if !reflect.DeepEqual(containerInfo, expected) { if !reflect.DeepEqual(containerInfo, expected) {

View File

@ -772,7 +772,7 @@ func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP string, c v1.Container, r
var annotations appctypes.Annotations = []appctypes.Annotation{ var annotations appctypes.Annotations = []appctypes.Annotation{
{ {
Name: *appctypes.MustACIdentifier(k8sRktContainerHashAnno), Name: *appctypes.MustACIdentifier(k8sRktContainerHashAnno),
Value: strconv.FormatUint(kubecontainer.HashContainer(&c), 10), Value: strconv.FormatUint(kubecontainer.HashContainerLegacy(&c), 10),
}, },
{ {
Name: *appctypes.MustACIdentifier(types.KubernetesContainerNameLabel), Name: *appctypes.MustACIdentifier(types.KubernetesContainerNameLabel),
@ -923,7 +923,7 @@ func apiPodToruntimePod(uuid string, pod *v1.Pod) *kubecontainer.Pod {
ID: buildContainerID(&containerID{uuid, c.Name}), ID: buildContainerID(&containerID{uuid, c.Name}),
Name: c.Name, Name: c.Name,
Image: c.Image, Image: c.Image,
Hash: kubecontainer.HashContainer(c), Hash: kubecontainer.HashContainerLegacy(c),
}) })
} }
return p return p
@ -1721,7 +1721,7 @@ func (r *Runtime) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer.
restartPod := false restartPod := false
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
expectedHash := kubecontainer.HashContainer(&container) expectedHash := kubecontainer.HashContainerLegacy(&container)
c := runningPod.FindContainerByName(container.Name) c := runningPod.FindContainerByName(container.Name)
if c == nil { if c == nil {