mirror of
https://github.com/rancher/os.git
synced 2025-09-05 08:42:38 +00:00
fix getHash()
This commit is contained in:
@@ -6,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
@@ -44,14 +45,18 @@ func (c ByCreated) Less(i, j int) bool { return c[j].Created < c[i].Created }
|
||||
func getHash(containerCfg *config.ContainerConfig) string {
|
||||
hash := sha1.New()
|
||||
|
||||
hash.Write([]byte(containerCfg.Id))
|
||||
hash.Write([]byte(containerCfg.Cmd))
|
||||
io.WriteString(hash, fmt.Sprintln(containerCfg.Id))
|
||||
io.WriteString(hash, fmt.Sprintln(containerCfg.Cmd))
|
||||
io.WriteString(hash, fmt.Sprintln(containerCfg.MigrateVolumes))
|
||||
io.WriteString(hash, fmt.Sprintln(containerCfg.ReloadConfig))
|
||||
io.WriteString(hash, fmt.Sprintln(containerCfg.CreateOnly))
|
||||
|
||||
if containerCfg.Service != nil {
|
||||
//Get values of Service through reflection
|
||||
val := reflect.ValueOf(containerCfg.Service).Elem()
|
||||
|
||||
//Create slice to sort the keys in Service Config, which allow constant hash ordering
|
||||
var serviceKeys []string
|
||||
serviceKeys := []string{}
|
||||
|
||||
//Create a data structure of map of values keyed by a string
|
||||
unsortedKeyValue := make(map[string]interface{})
|
||||
@@ -69,14 +74,14 @@ func getHash(containerCfg *config.ContainerConfig) string {
|
||||
sort.Strings(serviceKeys)
|
||||
|
||||
//Go through keys and write hash
|
||||
for i := 0; i < len(serviceKeys); i++ {
|
||||
serviceValue := unsortedKeyValue[serviceKeys[i]]
|
||||
sliceKeys := []string{}
|
||||
for _, serviceKey := range serviceKeys {
|
||||
serviceValue := unsortedKeyValue[serviceKey]
|
||||
|
||||
io.WriteString(hash, fmt.Sprintf("\n %v: ", serviceKey))
|
||||
|
||||
switch s := serviceValue.(type) {
|
||||
default:
|
||||
hash.Write([]byte(fmt.Sprintf("%v", serviceValue)))
|
||||
case *project.SliceorMap:
|
||||
case project.SliceorMap:
|
||||
sliceKeys := []string{}
|
||||
for lkey := range s.MapParts() {
|
||||
if lkey != "io.rancher.os.hash" {
|
||||
sliceKeys = append(sliceKeys, lkey)
|
||||
@@ -84,28 +89,37 @@ func getHash(containerCfg *config.ContainerConfig) string {
|
||||
}
|
||||
sort.Strings(sliceKeys)
|
||||
|
||||
for j := 0; j < len(sliceKeys); j++ {
|
||||
hash.Write([]byte(fmt.Sprintf("%s=%v", sliceKeys[j], s.MapParts()[sliceKeys[j]])))
|
||||
for _, sliceKey := range sliceKeys {
|
||||
io.WriteString(hash, fmt.Sprintf("%s=%v, ", sliceKey, s.MapParts()[sliceKey]))
|
||||
}
|
||||
case *project.Stringorslice:
|
||||
sliceKeys = s.Slice()
|
||||
case project.Maporslice:
|
||||
sliceKeys := s.Slice()
|
||||
sort.Strings(sliceKeys)
|
||||
|
||||
for j := 0; j < len(sliceKeys); j++ {
|
||||
hash.Write([]byte(fmt.Sprintf("%s", sliceKeys[j])))
|
||||
for _, sliceKey := range sliceKeys {
|
||||
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
|
||||
}
|
||||
case project.Stringorslice:
|
||||
sliceKeys := s.Slice()
|
||||
sort.Strings(sliceKeys)
|
||||
|
||||
for _, sliceKey := range sliceKeys {
|
||||
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
|
||||
}
|
||||
case []string:
|
||||
sliceKeys = s
|
||||
sliceKeys := s
|
||||
sort.Strings(sliceKeys)
|
||||
|
||||
for j := 0; j < len(sliceKeys); j++ {
|
||||
hash.Write([]byte(fmt.Sprintf("%s", sliceKeys[j])))
|
||||
for _, sliceKey := range sliceKeys {
|
||||
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
|
||||
}
|
||||
default:
|
||||
io.WriteString(hash, fmt.Sprintf("%v", serviceValue))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hex.EncodeToString(hash.Sum([]byte{}))
|
||||
return hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
||||
func StartAndWait(dockerHost string, containerCfg *config.ContainerConfig) error {
|
||||
|
Reference in New Issue
Block a user