run hack/update-staging-client-go.sh before the copy.sh changes

This commit is contained in:
Chao Xu 2016-12-20 17:21:00 -08:00
parent ef44fa1919
commit 3cd36e3604
6 changed files with 35 additions and 7 deletions

View File

@ -404,6 +404,12 @@ func IsForbidden(err error) bool {
return reasonForError(err) == metav1.StatusReasonForbidden return reasonForError(err) == metav1.StatusReasonForbidden
} }
// IsTimeout determines if err is an error which indicates that request times out due to long
// processing.
func IsTimeout(err error) bool {
return reasonForError(err) == metav1.StatusReasonTimeout
}
// IsServerTimeout determines if err is an error which indicates that the request needs to be retried // IsServerTimeout determines if err is an error which indicates that the request needs to be retried
// by the client. // by the client.
func IsServerTimeout(err error) bool { func IsServerTimeout(err error) bool {

View File

@ -3249,6 +3249,9 @@ const (
// - Secret.Data["token"] - a token that identifies the service account to the API // - Secret.Data["token"] - a token that identifies the service account to the API
SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
// SecretTypeBootstrapToken is the key for tokens used by kubeadm to validate cluster info during discovery.
SecretTypeBootstrapToken = "bootstrap.kubernetes.io/token"
// ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountNameKey = "kubernetes.io/service-account.name" ServiceAccountNameKey = "kubernetes.io/service-account.name"
// ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets

View File

@ -19,6 +19,7 @@ package kubeadm
import ( import (
"fmt" "fmt"
"os" "os"
"path"
"runtime" "runtime"
"strings" "strings"
) )
@ -46,9 +47,9 @@ func SetEnvParams() *EnvParams {
} }
return &EnvParams{ return &EnvParams{
KubernetesDir: envParams["kubernetes_dir"], KubernetesDir: path.Clean(envParams["kubernetes_dir"]),
HostPKIPath: envParams["host_pki_path"], HostPKIPath: path.Clean(envParams["host_pki_path"]),
HostEtcdPath: envParams["host_etcd_path"], HostEtcdPath: path.Clean(envParams["host_etcd_path"]),
HyperkubeImage: envParams["hyperkube_image"], HyperkubeImage: envParams["hyperkube_image"],
RepositoryPrefix: envParams["repo_prefix"], RepositoryPrefix: envParams["repo_prefix"],
DiscoveryImage: envParams["discovery_image"], DiscoveryImage: envParams["discovery_image"],

View File

@ -27,6 +27,15 @@ const ConfigMirrorAnnotationKey = "kubernetes.io/config.mirror"
const ConfigFirstSeenAnnotationKey = "kubernetes.io/config.seen" const ConfigFirstSeenAnnotationKey = "kubernetes.io/config.seen"
const ConfigHashAnnotationKey = "kubernetes.io/config.hash" const ConfigHashAnnotationKey = "kubernetes.io/config.hash"
// This key needs to sync with the key used by the rescheduler, which currently
// lives in contrib. Its presence indicates 2 things, as far as the kubelet is
// concerned:
// 1. Resource related admission checks will prioritize the admission of
// pods bearing the key, over pods without the key, regardless of QoS.
// 2. The OOM score of pods bearing the key will be <= pods without
// the key (where the <= part is determied by QoS).
const CriticalPodAnnotationKey = "scheduler.alpha.kubernetes.io/critical-pod"
// PodOperation defines what changes will be made on a pod configuration. // PodOperation defines what changes will be made on a pod configuration.
type PodOperation int type PodOperation int

View File

@ -194,7 +194,7 @@ func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error
// we could potentially look for '---' // we could potentially look for '---'
return false, true, nil return false, true, nil
} }
_, ok = utilyaml.GuessJSONStream(peek, 2048) _, _, ok = utilyaml.GuessJSONStream(peek, 2048)
return ok, false, nil return ok, false, nil
} }

View File

@ -181,6 +181,7 @@ type YAMLOrJSONDecoder struct {
bufferSize int bufferSize int
decoder decoder decoder decoder
rawData []byte
} }
// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents // NewYAMLOrJSONDecoder returns a decoder that will process YAML documents
@ -198,10 +199,11 @@ func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder {
// provide object, or returns an error. // provide object, or returns an error.
func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
if d.decoder == nil { if d.decoder == nil {
buffer, isJSON := GuessJSONStream(d.r, d.bufferSize) buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize)
if isJSON { if isJSON {
glog.V(4).Infof("decoding stream as JSON") glog.V(4).Infof("decoding stream as JSON")
d.decoder = json.NewDecoder(buffer) d.decoder = json.NewDecoder(buffer)
d.rawData = origData
} else { } else {
glog.V(4).Infof("decoding stream as YAML") glog.V(4).Infof("decoding stream as YAML")
d.decoder = NewYAMLToJSONDecoder(buffer) d.decoder = NewYAMLToJSONDecoder(buffer)
@ -215,6 +217,13 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
glog.V(4).Infof("reading stream failed: %v", readErr) glog.V(4).Infof("reading stream failed: %v", readErr)
} }
js := string(data) js := string(data)
// if contents from io.Reader are not complete,
// use the original raw data to prevent panic
if int64(len(js)) <= syntax.Offset {
js = string(d.rawData)
}
start := strings.LastIndex(js[:syntax.Offset], "\n") + 1 start := strings.LastIndex(js[:syntax.Offset], "\n") + 1
line := strings.Count(js[:start], "\n") line := strings.Count(js[:start], "\n")
return fmt.Errorf("json: line %d: %s", line, syntax.Error()) return fmt.Errorf("json: line %d: %s", line, syntax.Error())
@ -296,10 +305,10 @@ func (r *LineReader) Read() ([]byte, error) {
// GuessJSONStream scans the provided reader up to size, looking // GuessJSONStream scans the provided reader up to size, looking
// for an open brace indicating this is JSON. It will return the // for an open brace indicating this is JSON. It will return the
// bufio.Reader it creates for the consumer. // bufio.Reader it creates for the consumer.
func GuessJSONStream(r io.Reader, size int) (io.Reader, bool) { func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) {
buffer := bufio.NewReaderSize(r, size) buffer := bufio.NewReaderSize(r, size)
b, _ := buffer.Peek(size) b, _ := buffer.Peek(size)
return buffer, hasJSONPrefix(b) return buffer, b, hasJSONPrefix(b)
} }
var jsonPrefix = []byte("{") var jsonPrefix = []byte("{")