From 732647ea97033601a962455ae4fa2b883bc6a713 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Wed, 17 Jun 2015 15:48:27 -0400 Subject: [PATCH 1/3] Improve conversion to support multiple packages OpenShift uses multiple API packages (types are split) which Kube will also eventually have as we introduce more plugins. These changes make the generators able to handle importing different API object packages into a single generator function. --- cmd/genconversion/conversion.go | 13 +- cmd/gendeepcopy/deep_copy.go | 13 +- hack/update-generated-conversions.sh | 8 -- pkg/runtime/conversion_generator.go | 177 +++++++++++++++++++++---- pkg/runtime/deep_copy_generator.go | 188 +++++++++++++++++++++------ 5 files changed, 325 insertions(+), 74 deletions(-) diff --git a/cmd/genconversion/conversion.go b/cmd/genconversion/conversion.go index c25fb0d0aad..c60b8f1dacd 100644 --- a/cmd/genconversion/conversion.go +++ b/cmd/genconversion/conversion.go @@ -17,13 +17,16 @@ limitations under the License. package main import ( + "fmt" "io" "os" + "path" "runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/golang/glog" flag "github.com/spf13/pflag" @@ -50,7 +53,9 @@ func main() { funcOut = file } - generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw()) + generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw(), path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", *version)) + apiShort := generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api") + generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource") // TODO(wojtek-t): Change the overwrites to a flag. generator.OverwritePackage(*version, "") for _, knownType := range api.Scheme.KnownTypes(*version) { @@ -58,10 +63,14 @@ func main() { glog.Errorf("error while generating conversion functions for %v: %v", knownType, err) } } + generator.RepackImports(util.NewStringSet()) + if err := generator.WriteImports(funcOut); err != nil { + glog.Fatalf("error while writing imports: %v", err) + } if err := generator.WriteConversionFunctions(funcOut); err != nil { glog.Fatalf("Error while writing conversion functions: %v", err) } - if err := generator.RegisterConversionFunctions(funcOut); err != nil { + if err := generator.RegisterConversionFunctions(funcOut, fmt.Sprintf("%s.Scheme", apiShort)); err != nil { glog.Fatalf("Error while writing conversion functions: %v", err) } } diff --git a/cmd/gendeepcopy/deep_copy.go b/cmd/gendeepcopy/deep_copy.go index 59ac43bb72f..dcb1c33766d 100644 --- a/cmd/gendeepcopy/deep_copy.go +++ b/cmd/gendeepcopy/deep_copy.go @@ -19,12 +19,14 @@ package main import ( "io" "os" + "path" "runtime" "strings" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/golang/glog" flag "github.com/spf13/pflag" @@ -53,10 +55,14 @@ func main() { } knownVersion := *version + registerTo := "api.Scheme" if knownVersion == "api" { knownVersion = api.Scheme.Raw().InternalVersion + registerTo = "Scheme" } - generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw()) + pkgPath := path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", knownVersion) + generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), pkgPath, util.NewStringSet("github.com/GoogleCloudPlatform/kubernetes")) + generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api") for _, overwrite := range strings.Split(*overwrites, ",") { vals := strings.Split(overwrite, "=") @@ -67,13 +73,14 @@ func main() { glog.Errorf("error while generating deep copy functions for %v: %v", knownType, err) } } - if err := generator.WriteImports(funcOut, *version); err != nil { + generator.RepackImports() + if err := generator.WriteImports(funcOut); err != nil { glog.Fatalf("error while writing imports: %v", err) } if err := generator.WriteDeepCopyFunctions(funcOut); err != nil { glog.Fatalf("error while writing deep copy functions: %v", err) } - if err := generator.RegisterDeepCopyFunctions(funcOut, *version); err != nil { + if err := generator.RegisterDeepCopyFunctions(funcOut, registerTo); err != nil { glog.Fatalf("error while registering deep copy functions: %v", err) } } diff --git a/hack/update-generated-conversions.sh b/hack/update-generated-conversions.sh index d7b9f9fd591..7df31ca18f8 100755 --- a/hack/update-generated-conversions.sh +++ b/hack/update-generated-conversions.sh @@ -33,14 +33,6 @@ function generate_version() { cat >> $TMPFILE < 0 { + name = dirname + name + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + if subdirname := path.Base(path.Dir(path.Dir(pkg))); len(subdirname) > 0 { + name = subdirname + name + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + } + } + for i := 2; i < 100; i++ { + generatedName := fmt.Sprintf("%s%d", name, i) + if _, ok := g.shortImports[generatedName]; !ok { + g.imports[pkg] = generatedName + g.shortImports[generatedName] = pkg + return generatedName + } + } + panic(fmt.Sprintf("unable to find a unique name for the package path %q: %v", pkg, g.shortImports)) +} + func (g *conversionGenerator) typeName(inType reflect.Type) string { switch inType.Kind() { - case reflect.Map: - return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) case reflect.Slice: return fmt.Sprintf("[]%s", g.typeName(inType.Elem())) case reflect.Ptr: return fmt.Sprintf("*%s", g.typeName(inType.Elem())) + case reflect.Map: + if len(inType.Name()) == 0 { + return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) + } + fallthrough default: - typeWithPkg := fmt.Sprintf("%s", inType) - slices := strings.Split(typeWithPkg, ".") - if len(slices) == 1 { + pkg, name := inType.PkgPath(), inType.Name() + if len(name) == 0 && inType.Kind() == reflect.Struct { + return "struct{}" + } + if len(pkg) == 0 { // Default package. - return slices[0] + return name } - if len(slices) == 2 { - pkg := slices[0] - if val, found := g.pkgOverwrites[pkg]; found { - pkg = val - } - if pkg != "" { - pkg = pkg + "." - } - return pkg + slices[1] + if val, found := g.pkgOverwrites[pkg]; found { + pkg = val } - panic("Incorrect type name: " + typeWithPkg) + if len(pkg) == 0 { + return name + } + short := g.addImportByPath(pkg) + if len(short) > 0 { + return fmt.Sprintf("%s.%s", short, name) + } + return name } } @@ -658,6 +785,10 @@ func (g *conversionGenerator) existsDedicatedConversionFunction(inType, outType // unnamed. Thus we return false here. return false } + // TODO: no way to handle private conversions in different packages + if g.assumePrivateConversions { + return false + } return g.scheme.Converter().HasConversionFunc(inType, outType) } diff --git a/pkg/runtime/deep_copy_generator.go b/pkg/runtime/deep_copy_generator.go index 7be7af6bd86..20d931caa0a 100644 --- a/pkg/runtime/deep_copy_generator.go +++ b/pkg/runtime/deep_copy_generator.go @@ -19,6 +19,7 @@ package runtime import ( "fmt" "io" + "path" "reflect" "sort" "strings" @@ -38,9 +39,20 @@ type DeepCopyGenerator interface { // functions for this type and all nested types will be generated. AddType(inType reflect.Type) error + // ReplaceType registers a type that should be used instead of the type + // with the provided pkgPath and name. + ReplaceType(pkgPath, name string, in interface{}) + + // AddImport registers a package name with the generator and returns its + // short name. + AddImport(pkgPath string) string + + // RepackImports creates a stable ordering of import short names + RepackImports() + // Writes all imports that are necessary for deep-copy function and // their registration. - WriteImports(w io.Writer, pkg string) error + WriteImports(w io.Writer) error // Writes deel-copy functions for all types added via AddType() method // and their nested types. @@ -57,20 +69,80 @@ type DeepCopyGenerator interface { OverwritePackage(pkg, overwrite string) } -func NewDeepCopyGenerator(scheme *conversion.Scheme) DeepCopyGenerator { - return &deepCopyGenerator{ +func NewDeepCopyGenerator(scheme *conversion.Scheme, targetPkg string, include util.StringSet) DeepCopyGenerator { + g := &deepCopyGenerator{ scheme: scheme, + targetPkg: targetPkg, copyables: make(map[reflect.Type]bool), - imports: util.StringSet{}, + imports: make(map[string]string), + shortImports: make(map[string]string), pkgOverwrites: make(map[string]string), + replace: make(map[pkgPathNamePair]reflect.Type), + include: include, } + g.targetPackage(targetPkg) + g.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/conversion") + return g +} + +type pkgPathNamePair struct { + PkgPath string + Name string } type deepCopyGenerator struct { - scheme *conversion.Scheme - copyables map[reflect.Type]bool - imports util.StringSet + scheme *conversion.Scheme + targetPkg string + copyables map[reflect.Type]bool + // map of package names to shortname + imports map[string]string + // map of short names to package names + shortImports map[string]string pkgOverwrites map[string]string + replace map[pkgPathNamePair]reflect.Type + include util.StringSet +} + +func (g *deepCopyGenerator) addImportByPath(pkg string) string { + if name, ok := g.imports[pkg]; ok { + return name + } + name := path.Base(pkg) + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + if dirname := path.Base(path.Dir(pkg)); len(dirname) > 0 { + name = dirname + name + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + if subdirname := path.Base(path.Dir(path.Dir(pkg))); len(subdirname) > 0 { + name = subdirname + name + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + } + } + for i := 2; i < 100; i++ { + generatedName := fmt.Sprintf("%s%d", name, i) + if _, ok := g.shortImports[generatedName]; !ok { + g.imports[pkg] = generatedName + g.shortImports[generatedName] = pkg + return generatedName + } + } + panic(fmt.Sprintf("unable to find a unique name for the package path %q: %v", pkg, g.shortImports)) +} + +func (g *deepCopyGenerator) targetPackage(pkg string) { + g.imports[pkg] = "" + g.shortImports[""] = pkg } func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { @@ -90,11 +162,18 @@ func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { return err } case reflect.Interface: - g.imports.Insert(inType.PkgPath()) + g.addImportByPath(inType.PkgPath()) return nil case reflect.Struct: - g.imports.Insert(inType.PkgPath()) - if !strings.HasPrefix(inType.PkgPath(), "github.com/GoogleCloudPlatform/kubernetes") { + g.addImportByPath(inType.PkgPath()) + found := false + for s := range g.include { + if strings.HasPrefix(inType.PkgPath(), s) { + found = true + break + } + } + if !found { return nil } for i := 0; i < inType.NumField(); i++ { @@ -110,6 +189,15 @@ func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { return nil } +func (g *deepCopyGenerator) AddImport(pkg string) string { + return g.addImportByPath(pkg) +} + +// ReplaceType registers a replacement type to be used instead of the named type +func (g *deepCopyGenerator) ReplaceType(pkgPath, name string, t interface{}) { + g.replace[pkgPathNamePair{pkgPath, name}] = reflect.TypeOf(t) +} + func (g *deepCopyGenerator) AddType(inType reflect.Type) error { if inType.Kind() != reflect.Struct { return fmt.Errorf("non-struct copies are not supported") @@ -117,10 +205,23 @@ func (g *deepCopyGenerator) AddType(inType reflect.Type) error { return g.addAllRecursiveTypes(inType) } -func (g *deepCopyGenerator) WriteImports(w io.Writer, pkg string) error { +func (g *deepCopyGenerator) RepackImports() { + var packages []string + for key := range g.imports { + packages = append(packages, key) + } + sort.Strings(packages) + g.imports = make(map[string]string) + g.shortImports = make(map[string]string) + + g.targetPackage(g.targetPkg) + for _, pkg := range packages { + g.addImportByPath(pkg) + } +} + +func (g *deepCopyGenerator) WriteImports(w io.Writer) error { var packages []string - packages = append(packages, "github.com/GoogleCloudPlatform/kubernetes/pkg/api") - packages = append(packages, "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion") for key := range g.imports { packages = append(packages, key) } @@ -130,10 +231,13 @@ func (g *deepCopyGenerator) WriteImports(w io.Writer, pkg string) error { indent := 0 buffer.addLine("import (\n", indent) for _, importPkg := range packages { - if strings.HasSuffix(importPkg, pkg) { + if len(importPkg) == 0 { continue } - buffer.addLine(fmt.Sprintf("\"%s\"\n", importPkg), indent+1) + if len(g.imports[importPkg]) == 0 { + continue + } + buffer.addLine(fmt.Sprintf("%s \"%s\"\n", g.imports[importPkg], importPkg), indent+1) } buffer.addLine(")\n", indent) buffer.addLine("\n", indent) @@ -159,35 +263,47 @@ func (s byPkgAndName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (g *deepCopyGenerator) typeName(inType reflect.Type) string { +func (g *deepCopyGenerator) nameForType(inType reflect.Type) string { switch inType.Kind() { - case reflect.Map: - return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) case reflect.Slice: return fmt.Sprintf("[]%s", g.typeName(inType.Elem())) case reflect.Ptr: return fmt.Sprintf("*%s", g.typeName(inType.Elem())) + case reflect.Map: + if len(inType.Name()) == 0 { + return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) + } + fallthrough default: - typeWithPkg := fmt.Sprintf("%s", inType) - slices := strings.Split(typeWithPkg, ".") - if len(slices) == 1 { + pkg, name := inType.PkgPath(), inType.Name() + if len(name) == 0 && inType.Kind() == reflect.Struct { + return "struct{}" + } + if len(pkg) == 0 { // Default package. - return slices[0] + return name } - if len(slices) == 2 { - pkg := slices[0] - if val, found := g.pkgOverwrites[pkg]; found { - pkg = val - } - if pkg != "" { - pkg = pkg + "." - } - return pkg + slices[1] + if val, found := g.pkgOverwrites[pkg]; found { + pkg = val } - panic("Incorrect type name: " + typeWithPkg) + if len(pkg) == 0 { + return name + } + short := g.addImportByPath(pkg) + if len(short) > 0 { + return fmt.Sprintf("%s.%s", short, name) + } + return name } } +func (g *deepCopyGenerator) typeName(inType reflect.Type) string { + if t, ok := g.replace[pkgPathNamePair{inType.PkgPath(), inType.Name()}]; ok { + return g.nameForType(t) + } + return g.nameForType(inType) +} + func (g *deepCopyGenerator) deepCopyFunctionName(inType reflect.Type) string { funcNameFormat := "deepCopy_%s_%s" inPkg := packageForName(inType) @@ -442,12 +558,8 @@ func (g *deepCopyGenerator) writeDeepCopyForType(b *buffer, inType reflect.Type, func (g *deepCopyGenerator) writeRegisterHeader(b *buffer, pkg string, indent int) { b.addLine("func init() {\n", indent) - registerFormat := "err := %sScheme.AddGeneratedDeepCopyFuncs(\n" - if pkg == "api" { - b.addLine(fmt.Sprintf(registerFormat, ""), indent+1) - } else { - b.addLine(fmt.Sprintf(registerFormat, "api."), indent+1) - } + registerFormat := "err := %s.AddGeneratedDeepCopyFuncs(\n" + b.addLine(fmt.Sprintf(registerFormat, pkg), indent+1) } func (g *deepCopyGenerator) writeRegisterFooter(b *buffer, indent int) { From 487fe2d1162e83ee2d10c36f824d1ff3e778dbb3 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Wed, 29 Jul 2015 17:06:55 -0400 Subject: [PATCH 2/3] Update deep copies --- pkg/api/deep_copy_generated.go | 38 +++++++++++++++---------------- pkg/api/v1/deep_copy_generated.go | 36 ++++++++++++++--------------- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go index 7e9c27b0581..c4e083c068e 100644 --- a/pkg/api/deep_copy_generated.go +++ b/pkg/api/deep_copy_generated.go @@ -18,14 +18,14 @@ package api // AUTO-GENERATED FUNCTIONS START HERE import ( - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" - "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" - "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" - "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" - "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/util" - "speter.net/go/exp/math/dec/inf" - "time" + resource "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" + conversion "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" + fields "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" + labels "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" + runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + util "github.com/GoogleCloudPlatform/kubernetes/pkg/util" + inf "speter.net/go/exp/math/dec/inf" + time "time" ) func deepCopy_api_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error { @@ -587,7 +587,7 @@ func deepCopy_api_LimitRange(in LimitRange, out *LimitRange, c *conversion.Clone func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error { out.Type = in.Type if in.Max != nil { - out.Max = make(map[ResourceName]resource.Quantity) + out.Max = make(ResourceList) for key, val := range in.Max { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -599,7 +599,7 @@ func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv out.Max = nil } if in.Min != nil { - out.Min = make(map[ResourceName]resource.Quantity) + out.Min = make(ResourceList) for key, val := range in.Min { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -611,7 +611,7 @@ func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv out.Min = nil } if in.Default != nil { - out.Default = make(map[ResourceName]resource.Quantity) + out.Default = make(ResourceList) for key, val := range in.Default { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -857,7 +857,7 @@ func deepCopy_api_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) err func deepCopy_api_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error { if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1041,7 +1041,7 @@ func deepCopy_api_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, ou out.AccessModes = nil } if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1143,7 +1143,7 @@ func deepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *Persist func deepCopy_api_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error { if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1571,7 +1571,7 @@ func deepCopy_api_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList func deepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error { if in.Hard != nil { - out.Hard = make(map[ResourceName]resource.Quantity) + out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1587,7 +1587,7 @@ func deepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error { if in.Hard != nil { - out.Hard = make(map[ResourceName]resource.Quantity) + out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1599,7 +1599,7 @@ func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuota out.Hard = nil } if in.Used != nil { - out.Used = make(map[ResourceName]resource.Quantity) + out.Used = make(ResourceList) for key, val := range in.Used { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1615,7 +1615,7 @@ func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuota func deepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error { if in.Limits != nil { - out.Limits = make(map[ResourceName]resource.Quantity) + out.Limits = make(ResourceList) for key, val := range in.Limits { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1627,7 +1627,7 @@ func deepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceReq out.Limits = nil } if in.Requests != nil { - out.Requests = make(map[ResourceName]resource.Quantity) + out.Requests = make(ResourceList) for key, val := range in.Requests { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go index b61b2cdce3c..91c9b8d3f68 100644 --- a/pkg/api/v1/deep_copy_generated.go +++ b/pkg/api/v1/deep_copy_generated.go @@ -18,13 +18,13 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE import ( - "github.com/GoogleCloudPlatform/kubernetes/pkg/api" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" - "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" - "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/util" - "speter.net/go/exp/math/dec/inf" - "time" + api "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + resource "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" + conversion "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" + runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + util "github.com/GoogleCloudPlatform/kubernetes/pkg/util" + inf "speter.net/go/exp/math/dec/inf" + time "time" ) func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error { @@ -600,7 +600,7 @@ func deepCopy_v1_LimitRange(in LimitRange, out *LimitRange, c *conversion.Cloner func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error { out.Type = in.Type if in.Max != nil { - out.Max = make(map[ResourceName]resource.Quantity) + out.Max = make(ResourceList) for key, val := range in.Max { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -612,7 +612,7 @@ func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conve out.Max = nil } if in.Min != nil { - out.Min = make(map[ResourceName]resource.Quantity) + out.Min = make(ResourceList) for key, val := range in.Min { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -624,7 +624,7 @@ func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conve out.Min = nil } if in.Default != nil { - out.Default = make(map[ResourceName]resource.Quantity) + out.Default = make(ResourceList) for key, val := range in.Default { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -860,7 +860,7 @@ func deepCopy_v1_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) erro func deepCopy_v1_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error { if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1044,7 +1044,7 @@ func deepCopy_v1_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, out out.AccessModes = nil } if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1146,7 +1146,7 @@ func deepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *Persiste func deepCopy_v1_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error { if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1580,7 +1580,7 @@ func deepCopy_v1_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList, func deepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error { if in.Hard != nil { - out.Hard = make(map[ResourceName]resource.Quantity) + out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1596,7 +1596,7 @@ func deepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error { if in.Hard != nil { - out.Hard = make(map[ResourceName]resource.Quantity) + out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1608,7 +1608,7 @@ func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaS out.Hard = nil } if in.Used != nil { - out.Used = make(map[ResourceName]resource.Quantity) + out.Used = make(ResourceList) for key, val := range in.Used { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1624,7 +1624,7 @@ func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaS func deepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error { if in.Limits != nil { - out.Limits = make(map[ResourceName]resource.Quantity) + out.Limits = make(ResourceList) for key, val := range in.Limits { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1636,7 +1636,7 @@ func deepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequ out.Limits = nil } if in.Requests != nil { - out.Requests = make(map[ResourceName]resource.Quantity) + out.Requests = make(ResourceList) for key, val := range in.Requests { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { From 1d41f5ac75011daf465a7f799401793bd6bede81 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Wed, 29 Jul 2015 17:07:14 -0400 Subject: [PATCH 3/3] Update generated conversions --- pkg/api/v1/conversion_generated.go | 55 +++++++++++++++--------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go index d98300aba21..52cdbe805f6 100644 --- a/pkg/api/v1/conversion_generated.go +++ b/pkg/api/v1/conversion_generated.go @@ -16,15 +16,14 @@ limitations under the License. package v1 +// AUTO-GENERATED FUNCTIONS START HERE import ( - "reflect" - - "github.com/GoogleCloudPlatform/kubernetes/pkg/api" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" - "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" + api "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + resource "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" + conversion "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" + reflect "reflect" ) -// AUTO-GENERATED FUNCTIONS START HERE func convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.AWSElasticBlockStoreVolumeSource))(in) @@ -692,7 +691,7 @@ func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out } out.Type = LimitType(in.Type) if in.Max != nil { - out.Max = make(map[ResourceName]resource.Quantity) + out.Max = make(ResourceList) for key, val := range in.Max { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -704,7 +703,7 @@ func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out out.Max = nil } if in.Min != nil { - out.Min = make(map[ResourceName]resource.Quantity) + out.Min = make(ResourceList) for key, val := range in.Min { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -716,7 +715,7 @@ func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out out.Min = nil } if in.Default != nil { - out.Default = make(map[ResourceName]resource.Quantity) + out.Default = make(ResourceList) for key, val := range in.Default { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1006,7 +1005,7 @@ func convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus defaulting.(func(*api.NodeStatus))(in) } if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1216,7 +1215,7 @@ func convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(i out.AccessModes = nil } if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1330,7 +1329,7 @@ func convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.Persist defaulting.(func(*api.PersistentVolumeSpec))(in) } if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1735,7 +1734,7 @@ func convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuota defaulting.(func(*api.ResourceQuotaSpec))(in) } if in.Hard != nil { - out.Hard = make(map[ResourceName]resource.Quantity) + out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1754,7 +1753,7 @@ func convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQ defaulting.(func(*api.ResourceQuotaStatus))(in) } if in.Hard != nil { - out.Hard = make(map[ResourceName]resource.Quantity) + out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1766,7 +1765,7 @@ func convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQ out.Hard = nil } if in.Used != nil { - out.Used = make(map[ResourceName]resource.Quantity) + out.Used = make(ResourceList) for key, val := range in.Used { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1785,7 +1784,7 @@ func convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.Resourc defaulting.(func(*api.ResourceRequirements))(in) } if in.Limits != nil { - out.Limits = make(map[ResourceName]resource.Quantity) + out.Limits = make(ResourceList) for key, val := range in.Limits { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1797,7 +1796,7 @@ func convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.Resourc out.Limits = nil } if in.Requests != nil { - out.Requests = make(map[ResourceName]resource.Quantity) + out.Requests = make(ResourceList) for key, val := range in.Requests { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -2942,7 +2941,7 @@ func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *ap } out.Type = api.LimitType(in.Type) if in.Max != nil { - out.Max = make(map[api.ResourceName]resource.Quantity) + out.Max = make(api.ResourceList) for key, val := range in.Max { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -2954,7 +2953,7 @@ func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *ap out.Max = nil } if in.Min != nil { - out.Min = make(map[api.ResourceName]resource.Quantity) + out.Min = make(api.ResourceList) for key, val := range in.Min { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -2966,7 +2965,7 @@ func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *ap out.Min = nil } if in.Default != nil { - out.Default = make(map[api.ResourceName]resource.Quantity) + out.Default = make(api.ResourceList) for key, val := range in.Default { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -3256,7 +3255,7 @@ func convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus defaulting.(func(*NodeStatus))(in) } if in.Capacity != nil { - out.Capacity = make(map[api.ResourceName]resource.Quantity) + out.Capacity = make(api.ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -3466,7 +3465,7 @@ func convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(i out.AccessModes = nil } if in.Capacity != nil { - out.Capacity = make(map[api.ResourceName]resource.Quantity) + out.Capacity = make(api.ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -3580,7 +3579,7 @@ func convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentV defaulting.(func(*PersistentVolumeSpec))(in) } if in.Capacity != nil { - out.Capacity = make(map[api.ResourceName]resource.Quantity) + out.Capacity = make(api.ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -3985,7 +3984,7 @@ func convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec defaulting.(func(*ResourceQuotaSpec))(in) } if in.Hard != nil { - out.Hard = make(map[api.ResourceName]resource.Quantity) + out.Hard = make(api.ResourceList) for key, val := range in.Hard { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -4004,7 +4003,7 @@ func convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuota defaulting.(func(*ResourceQuotaStatus))(in) } if in.Hard != nil { - out.Hard = make(map[api.ResourceName]resource.Quantity) + out.Hard = make(api.ResourceList) for key, val := range in.Hard { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -4016,7 +4015,7 @@ func convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuota out.Hard = nil } if in.Used != nil { - out.Used = make(map[api.ResourceName]resource.Quantity) + out.Used = make(api.ResourceList) for key, val := range in.Used { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -4035,7 +4034,7 @@ func convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceReq defaulting.(func(*ResourceRequirements))(in) } if in.Limits != nil { - out.Limits = make(map[api.ResourceName]resource.Quantity) + out.Limits = make(api.ResourceList) for key, val := range in.Limits { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -4047,7 +4046,7 @@ func convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceReq out.Limits = nil } if in.Requests != nil { - out.Requests = make(map[api.ResourceName]resource.Quantity) + out.Requests = make(api.ResourceList) for key, val := range in.Requests { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil {