1
0
mirror of https://github.com/rancher/os.git synced 2025-07-19 09:26:27 +00:00

fix getHash()

This commit is contained in:
Ivan Mikushin 2015-05-17 20:39:31 +05:00
parent 2547db84e5
commit 59029a49a8
2 changed files with 43 additions and 25 deletions

View File

@ -6,6 +6,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"reflect"
"sort"
@ -44,14 +45,18 @@ func (c ByCreated) Less(i, j int) bool { return c[j].Created < c[i].Created }
func getHash(containerCfg *config.ContainerConfig) string {
hash := sha1.New()
hash.Write([]byte(containerCfg.Id))
hash.Write([]byte(containerCfg.Cmd))
io.WriteString(hash, fmt.Sprintln(containerCfg.Id))
io.WriteString(hash, fmt.Sprintln(containerCfg.Cmd))
io.WriteString(hash, fmt.Sprintln(containerCfg.MigrateVolumes))
io.WriteString(hash, fmt.Sprintln(containerCfg.ReloadConfig))
io.WriteString(hash, fmt.Sprintln(containerCfg.CreateOnly))
if containerCfg.Service != nil {
//Get values of Service through reflection
val := reflect.ValueOf(containerCfg.Service).Elem()
//Create slice to sort the keys in Service Config, which allow constant hash ordering
var serviceKeys []string
serviceKeys := []string{}
//Create a data structure of map of values keyed by a string
unsortedKeyValue := make(map[string]interface{})
@ -69,14 +74,14 @@ func getHash(containerCfg *config.ContainerConfig) string {
sort.Strings(serviceKeys)
//Go through keys and write hash
for i := 0; i < len(serviceKeys); i++ {
serviceValue := unsortedKeyValue[serviceKeys[i]]
sliceKeys := []string{}
for _, serviceKey := range serviceKeys {
serviceValue := unsortedKeyValue[serviceKey]
io.WriteString(hash, fmt.Sprintf("\n %v: ", serviceKey))
switch s := serviceValue.(type) {
default:
hash.Write([]byte(fmt.Sprintf("%v", serviceValue)))
case *project.SliceorMap:
case project.SliceorMap:
sliceKeys := []string{}
for lkey := range s.MapParts() {
if lkey != "io.rancher.os.hash" {
sliceKeys = append(sliceKeys, lkey)
@ -84,28 +89,37 @@ func getHash(containerCfg *config.ContainerConfig) string {
}
sort.Strings(sliceKeys)
for j := 0; j < len(sliceKeys); j++ {
hash.Write([]byte(fmt.Sprintf("%s=%v", sliceKeys[j], s.MapParts()[sliceKeys[j]])))
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s=%v, ", sliceKey, s.MapParts()[sliceKey]))
}
case *project.Stringorslice:
sliceKeys = s.Slice()
case project.Maporslice:
sliceKeys := s.Slice()
sort.Strings(sliceKeys)
for j := 0; j < len(sliceKeys); j++ {
hash.Write([]byte(fmt.Sprintf("%s", sliceKeys[j])))
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case project.Stringorslice:
sliceKeys := s.Slice()
sort.Strings(sliceKeys)
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case []string:
sliceKeys = s
sliceKeys := s
sort.Strings(sliceKeys)
for j := 0; j < len(sliceKeys); j++ {
hash.Write([]byte(fmt.Sprintf("%s", sliceKeys[j])))
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
default:
io.WriteString(hash, fmt.Sprintf("%v", serviceValue))
}
}
}
return hex.EncodeToString(hash.Sum([]byte{}))
return hex.EncodeToString(hash.Sum(nil))
}
func StartAndWait(dockerHost string, containerCfg *config.ContainerConfig) error {

View File

@ -1,6 +1,7 @@
package docker
import (
"fmt"
"strings"
"testing"
@ -9,7 +10,6 @@ import (
"github.com/stretchr/testify/require"
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/Sirupsen/logrus"
)
func TestHash(t *testing.T) {
@ -30,7 +30,7 @@ func TestHash(t *testing.T) {
Cmd: "1 2 3 4",
})
assert.Equal("510b68938cba936876588b0143093a5850d4a142", hash, "")
assert.Equal("d601444333c7fb4cb955bcca36c5ed59b6fa8c3f", hash, "")
assert.NotEqual(hash, hash2, "")
assert.NotEqual(hash2, hash3, "")
assert.NotEqual(hash, hash3, "")
@ -45,15 +45,19 @@ func TestHash2(t *testing.T) {
MigrateVolumes: false,
ReloadConfig: false,
CreateOnly: true,
Service: &project.ServiceConfig{CapAdd:[]string(nil), CapDrop:[]string(nil), CpuShares:0, Command:"", Detach:"", Dns:project.NewStringorslice(), DnsSearch:project.NewStringorslice(), DomainName:"", Entrypoint:"", EnvFile:"", Environment:project.NewMaporslice([]string{}), Hostname:"", Image:"state", Labels:project.NewSliceorMap(map[string]string{"io.rancher.os.createonly":"true", "io.rancher.os.scope":"system"}), Links:[]string(nil), LogDriver:"json-file", MemLimit:0, Name:"", Net:"none", Pid:"", Ipc:"", Ports:[]string(nil), Privileged:true, Restart:"", ReadOnly:true, StdinOpen:false, Tty:false, User:"", Volumes:[]string{"/var/lib/docker:/var/lib/docker", "/var/lib/rancher/conf:/var/lib/rancher/conf", "/var/lib/system-docker:/var/lib/system-docker"}, VolumesFrom:[]string(nil), WorkingDir:"", Expose:[]string(nil), ExternalLinks:[]string(nil)},
Service: &project.ServiceConfig{CapAdd:nil, CapDrop:nil, CpuShares:0, Command:"", Detach:"", Dns:project.NewStringorslice(), DnsSearch:project.NewStringorslice(), DomainName:"", Entrypoint:"", EnvFile:"", Environment:project.NewMaporslice([]string{}), Hostname:"", Image:"state", Labels:project.NewSliceorMap(map[string]string{"io.rancher.os.createonly":"true", "io.rancher.os.scope":"system"}), Links:nil, LogDriver:"json-file", MemLimit:0, Name:"", Net:"none", Pid:"", Ipc:"", Ports:nil, Privileged:true, Restart:"", ReadOnly:true, StdinOpen:false, Tty:false, User:"", Volumes:[]string{"/var/lib/docker:/var/lib/docker", "/var/lib/rancher/conf:/var/lib/rancher/conf", "/var/lib/system-docker:/var/lib/system-docker"}, VolumesFrom:nil, WorkingDir:"", Expose:nil, ExternalLinks:nil},
}
for i := 0; i < 10000; i++ {
logrus.Infoln(i)
assert.Equal(getHash(cfg), getHash(cfg), "")
for i := 0; i < 1000; i++ {
assert.Equal(getHash(cfg), getHash(cfg), fmt.Sprintf("Failed at iteration: %v", i))
}
}
func TestBool2String(t *testing.T) {
assert := require.New(t)
assert.Equal("true", fmt.Sprint(true), "")
}
func TestParse(t *testing.T) {
assert := require.New(t)