Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Gao Zheng 2015-04-30 15:42:14 +08:00
commit b445afdf11
6 changed files with 3272 additions and 2769 deletions

View File

@ -20,7 +20,6 @@ import (
"io"
"os"
"runtime"
"strings"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1"
@ -33,39 +32,51 @@ import (
)
var (
outputDest = flag.StringP("output", "o", "-", "Output destination; '-' means stdout")
versions = flag.StringP("versions", "v", "v1beta3", "Comma separated list of versions for conversion.")
functionDest = flag.StringP("funcDest", "f", "-", "Output for conversion functions; '-' means stdout")
namesDest = flag.StringP("nameDest", "n", "-", "Output for function names; '-' means stdout")
version = flag.StringP("version", "v", "v1beta3", "Version for conversion.")
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
flag.Parse()
var out io.Writer
if *outputDest == "-" {
out = os.Stdout
var funcOut io.Writer
if *functionDest == "-" {
funcOut = os.Stdout
} else {
file, err := os.Create(*outputDest)
file, err := os.Create(*functionDest)
if err != nil {
glog.Fatalf("Couldn't open %v: %v", *outputDest, err)
glog.Fatalf("Couldn't open %v: %v", *functionDest, err)
}
defer file.Close()
out = file
funcOut = file
}
var nameOut io.Writer
if *namesDest == "-" {
nameOut = os.Stdout
} else {
file, err := os.Create(*namesDest)
if err != nil {
glog.Fatalf("Couldn't open %v: %v", *functionDest, err)
}
defer file.Close()
nameOut = file
}
versionsForConversion := strings.Split(*versions, ",")
for _, version := range versionsForConversion {
generator := conversion.NewGenerator(api.Scheme.Raw())
// TODO(wojtek-t): Change the overwrites to a flag.
generator.OverwritePackage(version, "")
generator.OverwritePackage("api", "newer")
for _, knownType := range api.Scheme.KnownTypes(version) {
if err := generator.GenerateConversionsForType(version, knownType); err != nil {
glog.Errorf("error while generating conversion functions for %v: %v", knownType, err)
}
}
if err := generator.WriteConversionFunctions(out); err != nil {
glog.Fatalf("Error while writing conversion functions: %v", err)
generator := conversion.NewGenerator(api.Scheme.Raw())
// TODO(wojtek-t): Change the overwrites to a flag.
generator.OverwritePackage(*version, "")
generator.OverwritePackage("api", "newer")
for _, knownType := range api.Scheme.KnownTypes(*version) {
if err := generator.GenerateConversionsForType(*version, knownType); err != nil {
glog.Errorf("error while generating conversion functions for %v: %v", knownType, err)
}
}
if err := generator.WriteConversionFunctions(funcOut); err != nil {
glog.Fatalf("Error while writing conversion functions: %v", err)
}
if err := generator.WriteConversionFunctionNames(nameOut); err != nil {
glog.Fatalf("Error while writing conversion functions: %v", err)
}
}

View File

@ -222,9 +222,24 @@ types, structural change in particular - you must add some logic to convert
versioned APIs to and from the internal representation. If you see errors from
the `serialization_test`, it may indicate the need for explicit conversions.
Performance of conversions very heavily influence performance of apiserver.
Thus, we are auto-generating conversion functions that are much more efficient
than the generic ones (which are based on reflections and thus are highly
inefficient).
The conversion code resides with each versioned API -
`pkg/api/<version>/conversion.go`. Unsurprisingly, this also requires you to
add tests to `pkg/api/<version>/conversion_test.go`.
`pkg/api/<version>/conversion.go`. To regenerate conversion functions:
- run
```
$ go run cmd/kube-conversion/conversion.go -v <version> -f <file1.txt> -n <file2.txt>
```
- replace all conversion functions (convert\* functions) in the above file
with the contents of \<file1.txt\>
- replace arguments of `newer.Scheme.AddGeneratedConversionFuncs`
with the contents of \<file2.txt\>
Unsurprisingly, this also requires you to add tests to
`pkg/api/<version>/conversion_test.go`.
## Update the fuzzer

File diff suppressed because it is too large Load Diff

View File

@ -27,6 +27,7 @@ import (
type Generator interface {
GenerateConversionsForType(version string, reflection reflect.Type) error
WriteConversionFunctions(w io.Writer) error
WriteConversionFunctionNames(w io.Writer) error
OverwritePackage(pkg, overwrite string)
}
@ -177,7 +178,7 @@ func (g *generator) WriteConversionFunctions(w io.Writer) error {
}
sort.Sort(byName(keys))
indent := 2
indent := 0
for _, inType := range keys {
outType := g.convertibles[inType]
// All types in g.convertibles are structs.
@ -194,12 +195,32 @@ func (g *generator) WriteConversionFunctions(w io.Writer) error {
return nil
}
func (g *generator) WriteConversionFunctionNames(w io.Writer) error {
// Write conversion function names alphabetically ordered.
var names []string
for inType, outType := range g.convertibles {
names = append(names, conversionFunctionName(inType, outType))
names = append(names, conversionFunctionName(outType, inType))
}
sort.Strings(names)
indent := 2
for _, name := range names {
if err := writeLine(w, indent, fmt.Sprintf("%s,\n", name)); err != nil {
return err
}
}
return nil
}
func (g *generator) typeName(inType reflect.Type) string {
switch inType.Kind() {
case reflect.Map:
return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem()))
case reflect.Slice:
return fmt.Sprintf("[]%s", g.typeName(inType.Elem()))
case reflect.Ptr:
return fmt.Sprintf("*%s", g.typeName(inType.Elem()))
default:
typeWithPkg := fmt.Sprintf("%s", inType)
slices := strings.Split(typeWithPkg, ".")
@ -221,6 +242,22 @@ func (g *generator) typeName(inType reflect.Type) string {
}
}
func packageForName(inType reflect.Type) string {
if inType.PkgPath() == "" {
return ""
}
slices := strings.Split(inType.PkgPath(), "/")
return slices[len(slices)-1]
}
func conversionFunctionName(inType, outType reflect.Type) string {
funcNameFormat := "convert_%s_%s_To_%s_%s"
inPkg := packageForName(inType)
outPkg := packageForName(outType)
funcName := fmt.Sprintf(funcNameFormat, inPkg, inType.Name(), outPkg, outType.Name())
return funcName
}
func indentLine(w io.Writer, indent int) error {
indentation := ""
for i := 0; i < indent; i++ {
@ -240,9 +277,9 @@ func writeLine(w io.Writer, indent int, line string) error {
return nil
}
func writeHeader(w io.Writer, inType, outType string, indent int) error {
format := "func(in *%s, out *%s, s conversion.Scope) error {\n"
stmt := fmt.Sprintf(format, inType, outType)
func writeHeader(w io.Writer, name, inType, outType string, indent int) error {
format := "func %s(in *%s, out *%s, s conversion.Scope) error {\n"
stmt := fmt.Sprintf(format, name, inType, outType)
return writeLine(w, indent, stmt)
}
@ -250,7 +287,7 @@ func writeFooter(w io.Writer, indent int) error {
if err := writeLine(w, indent+1, "return nil\n"); err != nil {
return err
}
if err := writeLine(w, indent, "},\n"); err != nil {
if err := writeLine(w, indent, "}\n"); err != nil {
return err
}
return nil
@ -336,13 +373,29 @@ func (g *generator) writeConversionForSlice(w io.Writer, inField, outField refle
if err := writeLine(w, indent+1, forStmt); err != nil {
return err
}
if inField.Type.Elem().AssignableTo(outField.Type.Elem()) {
assignFormat := "out.%s[i] = in.%s[i]\n"
assignStmt := fmt.Sprintf(assignFormat, outField.Name, inField.Name)
if err := writeLine(w, indent+2, assignStmt); err != nil {
return err
assigned := false
switch inField.Type.Elem().Kind() {
case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct:
// Don't copy these via assignment/conversion!
default:
// This should handle all simple types.
if inField.Type.Elem().AssignableTo(outField.Type.Elem()) {
assignFormat := "out.%s[i] = in.%s[i]\n"
assignStmt := fmt.Sprintf(assignFormat, outField.Name, inField.Name)
if err := writeLine(w, indent+2, assignStmt); err != nil {
return err
}
assigned = true
} else if inField.Type.Elem().ConvertibleTo(outField.Type.Elem()) {
assignFormat := "out.%s[i] = %s(in.%s[i])\n"
assignStmt := fmt.Sprintf(assignFormat, outField.Name, g.typeName(outField.Type.Elem()), inField.Name)
if err := writeLine(w, indent+2, assignStmt); err != nil {
return err
}
assigned = true
}
} else {
}
if !assigned {
assignFormat := "if err := s.Convert(&in.%s[i], &out.%s[i], 0); err != nil {\n"
assignStmt := fmt.Sprintf(assignFormat, inField.Name, outField.Name)
if err := writeLine(w, indent+2, assignStmt); err != nil {
@ -471,10 +524,6 @@ func (g *generator) writeConversionForStruct(w io.Writer, inType, outType reflec
continue
}
// TODO(wojtek-t): At some point we may consider named functions and call
// appropriate function here instead of using generic Convert method that
// will call the same method underneath after some additional operations.
assignFormat := "if err := s.Convert(&in.%s, &out.%s, 0); err != nil {\n"
assignStmt := fmt.Sprintf(assignFormat, inField.Name, outField.Name)
if err := writeLine(w, indent, assignStmt); err != nil {
@ -491,7 +540,8 @@ func (g *generator) writeConversionForStruct(w io.Writer, inType, outType reflec
}
func (g *generator) writeConversionForType(w io.Writer, inType, outType reflect.Type, indent int) error {
if err := writeHeader(w, g.typeName(inType), g.typeName(outType), indent); err != nil {
funcName := conversionFunctionName(inType, outType)
if err := writeHeader(w, funcName, g.typeName(inType), g.typeName(outType), indent); err != nil {
return err
}
switch inType.Kind() {
@ -505,6 +555,9 @@ func (g *generator) writeConversionForType(w io.Writer, inType, outType reflect.
if err := writeFooter(w, indent); err != nil {
return err
}
if err := writeLine(w, 0, "\n"); err != nil {
return err
}
return nil
}

View File

@ -26,6 +26,7 @@ import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
apierrors "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl"
@ -155,44 +156,59 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg
}
}
if len(image) != 0 {
var err error
// load the old RC into the "new" RC
if newRc, err = client.ReplicationControllers(cmdNamespace).Get(oldName); err != nil {
return err
}
if len(newRc.Spec.Template.Spec.Containers) > 1 {
// TODO: support multi-container image update.
return errors.New("Image update is not supported for multi-container pods")
}
if len(newRc.Spec.Template.Spec.Containers) == 0 {
return cmdutil.UsageError(cmd, "Pod has no containers! (%v)", newRc)
}
newRc.Spec.Template.Spec.Containers[0].Image = image
newHash, err := hashObject(newRc, client.Codec)
if err != nil {
return err
}
var newName string
var err error
if len(args) >= 2 {
newName = args[1]
} else {
keepOldName = true
newName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
newName, _ = kubectl.GetNextControllerAnnotation(oldRc)
}
newRc.Name = newName
newRc.Spec.Selector[deploymentKey] = newHash
newRc.Spec.Template.Labels[deploymentKey] = newHash
// Clear resource version after hashing so that identical updates get different hashes.
newRc.ResourceVersion = ""
if _, found := oldRc.Spec.Selector[deploymentKey]; !found {
if err := addDeploymentKeyToReplicationController(oldRc, client, deploymentKey, cmdNamespace, out); err != nil {
if len(newName) > 0 {
newRc, err = client.ReplicationControllers(cmdNamespace).Get(newName)
if err != nil && !apierrors.IsNotFound(err) {
return err
}
fmt.Fprint(out, "Found existing update in progress (%s), resuming.\n", newName)
}
if newRc == nil {
// load the old RC into the "new" RC
if newRc, err = client.ReplicationControllers(cmdNamespace).Get(oldName); err != nil {
return err
}
if len(newRc.Spec.Template.Spec.Containers) > 1 {
// TODO: support multi-container image update.
return errors.New("Image update is not supported for multi-container pods")
}
if len(newRc.Spec.Template.Spec.Containers) == 0 {
return cmdutil.UsageError(cmd, "Pod has no containers! (%v)", newRc)
}
newRc.Spec.Template.Spec.Containers[0].Image = image
newHash, err := hashObject(newRc, client.Codec)
if err != nil {
return err
}
if len(newName) == 0 {
keepOldName = true
newName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
}
newRc.Name = newName
newRc.Spec.Selector[deploymentKey] = newHash
newRc.Spec.Template.Labels[deploymentKey] = newHash
// Clear resource version after hashing so that identical updates get different hashes.
newRc.ResourceVersion = ""
kubectl.SetNextControllerAnnotation(oldRc, newName)
if _, found := oldRc.Spec.Selector[deploymentKey]; !found {
if err := addDeploymentKeyToReplicationController(oldRc, client, deploymentKey, cmdNamespace, out); err != nil {
return err
}
}
}
}
newName := newRc.Name

View File

@ -82,8 +82,21 @@ func NewRollingUpdater(namespace string, c RollingUpdaterClient) *RollingUpdater
const (
sourceIdAnnotation = kubectlAnnotationPrefix + "update-source-id"
desiredReplicasAnnotation = kubectlAnnotationPrefix + "desired-replicas"
nextControllerAnnotation = kubectlAnnotationPrefix + "next-controller-id"
)
func GetNextControllerAnnotation(rc *api.ReplicationController) (string, bool) {
res, found := rc.Annotations[nextControllerAnnotation]
return res, found
}
func SetNextControllerAnnotation(rc *api.ReplicationController, name string) {
if rc.Annotations == nil {
rc.Annotations = map[string]string{}
}
rc.Annotations[nextControllerAnnotation] = name
}
// Update all pods for a ReplicationController (oldRc) by creating a new
// controller (newRc) with 0 replicas, and synchronously resizing oldRc,newRc
// by 1 until oldRc has 0 replicas and newRc has the original # of desired