mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-28 13:03:43 +00:00
run update staging
This commit is contained in:
parent
ef19bf8413
commit
bbcf06167e
4
staging/src/k8s.io/client-go/Godeps/Godeps.json
generated
4
staging/src/k8s.io/client-go/Godeps/Godeps.json
generated
@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"ImportPath": "k8s.io/client-go",
|
"ImportPath": "k8s.io/client-go",
|
||||||
"GoVersion": "go1.7",
|
"GoVersion": "go1.7",
|
||||||
"GodepVersion": "v75",
|
"GodepVersion": "v74",
|
||||||
"Packages": [
|
"Packages": [
|
||||||
"./..."
|
"./..."
|
||||||
],
|
],
|
||||||
@ -183,7 +183,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/spf13/pflag",
|
"ImportPath": "github.com/spf13/pflag",
|
||||||
"Rev": "5ccb023bc27df288a957c5e994cd44fd19619465"
|
"Rev": "c7e63cf4530bcd3ba943729cee0efeff2ebea63f"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/stretchr/testify/assert",
|
"ImportPath": "github.com/stretchr/testify/assert",
|
||||||
|
@ -1,15 +0,0 @@
|
|||||||
# This is the official list of cloud authors for copyright purposes.
|
|
||||||
# This file is distinct from the CONTRIBUTORS files.
|
|
||||||
# See the latter for an explanation.
|
|
||||||
|
|
||||||
# Names should be added to this file as:
|
|
||||||
# Name or Organization <email address>
|
|
||||||
# The email address is not required for organizations.
|
|
||||||
|
|
||||||
Filippo Valsorda <hi@filippo.io>
|
|
||||||
Google Inc.
|
|
||||||
Ingo Oeser <nightlyone@googlemail.com>
|
|
||||||
Palm Stone Games, Inc.
|
|
||||||
Paweł Knap <pawelknap88@gmail.com>
|
|
||||||
Péter Szilágyi <peterke@gmail.com>
|
|
||||||
Tyler Treat <ttreat31@gmail.com>
|
|
@ -1,34 +0,0 @@
|
|||||||
# People who have agreed to one of the CLAs and can contribute patches.
|
|
||||||
# The AUTHORS file lists the copyright holders; this file
|
|
||||||
# lists people. For example, Google employees are listed here
|
|
||||||
# but not in AUTHORS, because Google holds the copyright.
|
|
||||||
#
|
|
||||||
# https://developers.google.com/open-source/cla/individual
|
|
||||||
# https://developers.google.com/open-source/cla/corporate
|
|
||||||
#
|
|
||||||
# Names should be added to this file as:
|
|
||||||
# Name <email address>
|
|
||||||
|
|
||||||
# Keep the list alphabetically sorted.
|
|
||||||
|
|
||||||
Andreas Litt <andreas.litt@gmail.com>
|
|
||||||
Andrew Gerrand <adg@golang.org>
|
|
||||||
Brad Fitzpatrick <bradfitz@golang.org>
|
|
||||||
Burcu Dogan <jbd@google.com>
|
|
||||||
Dave Day <djd@golang.org>
|
|
||||||
David Sansome <me@davidsansome.com>
|
|
||||||
David Symonds <dsymonds@golang.org>
|
|
||||||
Filippo Valsorda <hi@filippo.io>
|
|
||||||
Glenn Lewis <gmlewis@google.com>
|
|
||||||
Ingo Oeser <nightlyone@googlemail.com>
|
|
||||||
Johan Euphrosine <proppy@google.com>
|
|
||||||
Jonathan Amsterdam <jba@google.com>
|
|
||||||
Luna Duclos <luna.duclos@palmstonegames.com>
|
|
||||||
Michael McGreevy <mcgreevy@golang.org>
|
|
||||||
Omar Jarjur <ojarjur@google.com>
|
|
||||||
Paweł Knap <pawelknap88@gmail.com>
|
|
||||||
Péter Szilágyi <peterke@gmail.com>
|
|
||||||
Sarah Adams <shadams@google.com>
|
|
||||||
Toby Burress <kurin@google.com>
|
|
||||||
Tuo Shan <shantuo@google.com>
|
|
||||||
Tyler Treat <ttreat31@gmail.com>
|
|
@ -1,128 +0,0 @@
|
|||||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
|
||||||
Aaron Vinson <avinson.public@gmail.com>
|
|
||||||
Adam Enger <adamenger@gmail.com>
|
|
||||||
Adrian Mouat <adrian.mouat@gmail.com>
|
|
||||||
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
|
||||||
Alex Chan <alex.chan@metaswitch.com>
|
|
||||||
Alex Elman <aelman@indeed.com>
|
|
||||||
amitshukla <ashukla73@hotmail.com>
|
|
||||||
Amy Lindburg <amy.lindburg@docker.com>
|
|
||||||
Andrew Meredith <andymeredith@gmail.com>
|
|
||||||
Andrew T Nguyen <andrew.nguyen@docker.com>
|
|
||||||
Andrey Kostov <kostov.andrey@gmail.com>
|
|
||||||
Andy Goldstein <agoldste@redhat.com>
|
|
||||||
Anton Tiurin <noxiouz@yandex.ru>
|
|
||||||
Antonio Mercado <amercado@thinknode.com>
|
|
||||||
Antonio Murdaca <runcom@redhat.com>
|
|
||||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
|
||||||
Arthur Baars <arthur@semmle.com>
|
|
||||||
Asuka Suzuki <hello@tanksuzuki.com>
|
|
||||||
Avi Miller <avi.miller@oracle.com>
|
|
||||||
Ayose Cazorla <ayosec@gmail.com>
|
|
||||||
BadZen <dave.trombley@gmail.com>
|
|
||||||
Ben Firshman <ben@firshman.co.uk>
|
|
||||||
bin liu <liubin0329@gmail.com>
|
|
||||||
Brian Bland <brian.bland@docker.com>
|
|
||||||
burnettk <burnettk@gmail.com>
|
|
||||||
Carson A <ca@carsonoid.net>
|
|
||||||
Chris Dillon <squarism@gmail.com>
|
|
||||||
Daisuke Fujita <dtanshi45@gmail.com>
|
|
||||||
Darren Shepherd <darren@rancher.com>
|
|
||||||
Dave Trombley <dave.trombley@gmail.com>
|
|
||||||
Dave Tucker <dt@docker.com>
|
|
||||||
David Lawrence <david.lawrence@docker.com>
|
|
||||||
David Verhasselt <david@crowdway.com>
|
|
||||||
David Xia <dxia@spotify.com>
|
|
||||||
davidli <wenquan.li@hp.com>
|
|
||||||
Dejan Golja <dejan@golja.org>
|
|
||||||
Derek McGowan <derek@mcgstyle.net>
|
|
||||||
Diogo Mónica <diogo.monica@gmail.com>
|
|
||||||
DJ Enriquez <dj.enriquez@infospace.com>
|
|
||||||
Donald Huang <don.hcd@gmail.com>
|
|
||||||
Doug Davis <dug@us.ibm.com>
|
|
||||||
Eric Yang <windfarer@gmail.com>
|
|
||||||
farmerworking <farmerworking@gmail.com>
|
|
||||||
Felix Yan <felixonmars@archlinux.org>
|
|
||||||
Florentin Raud <florentin.raud@gmail.com>
|
|
||||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
|
||||||
gabriell nascimento <gabriell@bluesoft.com.br>
|
|
||||||
harche <p.harshal@gmail.com>
|
|
||||||
Henri Gomez <henri.gomez@gmail.com>
|
|
||||||
Hu Keping <hukeping@huawei.com>
|
|
||||||
Hua Wang <wanghua.humble@gmail.com>
|
|
||||||
HuKeping <hukeping@huawei.com>
|
|
||||||
Ian Babrou <ibobrik@gmail.com>
|
|
||||||
igayoso <igayoso@gmail.com>
|
|
||||||
Jack Griffin <jackpg14@gmail.com>
|
|
||||||
Jason Freidman <jason.freidman@gmail.com>
|
|
||||||
Jeff Nickoloff <jeff@allingeek.com>
|
|
||||||
Jessie Frazelle <jessie@docker.com>
|
|
||||||
Jianqing Wang <tsing@jianqing.org>
|
|
||||||
John Starks <jostarks@microsoft.com>
|
|
||||||
Jon Poler <jonathan.poler@apcera.com>
|
|
||||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
|
||||||
Jordan Liggitt <jliggitt@redhat.com>
|
|
||||||
Josh Hawn <josh.hawn@docker.com>
|
|
||||||
Julien Fernandez <julien.fernandez@gmail.com>
|
|
||||||
Keerthan Mala <kmala@engineyard.com>
|
|
||||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
|
||||||
Kenneth Lim <kennethlimcp@gmail.com>
|
|
||||||
Kenny Leung <kleung@google.com>
|
|
||||||
Li Yi <denverdino@gmail.com>
|
|
||||||
Liu Hua <sdu.liu@huawei.com>
|
|
||||||
liuchang0812 <liuchang0812@gmail.com>
|
|
||||||
Louis Kottmann <louis.kottmann@gmail.com>
|
|
||||||
Luke Carpenter <x@rubynerd.net>
|
|
||||||
Mary Anthony <mary@docker.com>
|
|
||||||
Matt Bentley <mbentley@mbentley.net>
|
|
||||||
Matt Duch <matt@learnmetrics.com>
|
|
||||||
Matt Moore <mattmoor@google.com>
|
|
||||||
Matt Robenolt <matt@ydekproductions.com>
|
|
||||||
Michael Prokop <mika@grml.org>
|
|
||||||
Michal Minar <miminar@redhat.com>
|
|
||||||
Miquel Sabaté <msabate@suse.com>
|
|
||||||
Morgan Bauer <mbauer@us.ibm.com>
|
|
||||||
moxiegirl <mary@docker.com>
|
|
||||||
Nathan Sullivan <nathan@nightsys.net>
|
|
||||||
nevermosby <robolwq@qq.com>
|
|
||||||
Nghia Tran <tcnghia@gmail.com>
|
|
||||||
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
|
||||||
Oilbeater <liumengxinfly@gmail.com>
|
|
||||||
Olivier Gambier <olivier@docker.com>
|
|
||||||
Olivier Jacques <olivier.jacques@hp.com>
|
|
||||||
Omer Cohen <git@omer.io>
|
|
||||||
Patrick Devine <patrick.devine@docker.com>
|
|
||||||
Philip Misiowiec <philip@atlashealth.com>
|
|
||||||
Richard Scothern <richard.scothern@docker.com>
|
|
||||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
|
||||||
Rusty Conover <rusty@luckydinosaur.com>
|
|
||||||
Sean Boran <Boran@users.noreply.github.com>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
|
||||||
Sharif Nassar <sharif@mrwacky.com>
|
|
||||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
|
||||||
Shreyas Karnik <karnik.shreyas@gmail.com>
|
|
||||||
Simon Thulbourn <simon+github@thulbourn.com>
|
|
||||||
Spencer Rinehart <anubis@overthemonkey.com>
|
|
||||||
Stefan Weil <sw@weilnetz.de>
|
|
||||||
Stephen J Day <stephen.day@docker.com>
|
|
||||||
Sungho Moon <sungho.moon@navercorp.com>
|
|
||||||
Sven Dowideit <SvenDowideit@home.org.au>
|
|
||||||
Sylvain Baubeau <sbaubeau@redhat.com>
|
|
||||||
Ted Reed <ted.reed@gmail.com>
|
|
||||||
tgic <farmer1992@gmail.com>
|
|
||||||
Thomas Sjögren <konstruktoid@users.noreply.github.com>
|
|
||||||
Tianon Gravi <admwiggin@gmail.com>
|
|
||||||
Tibor Vass <teabee89@gmail.com>
|
|
||||||
Tonis Tiigi <tonistiigi@gmail.com>
|
|
||||||
Trevor Pounds <trevor.pounds@gmail.com>
|
|
||||||
Troels Thomsen <troels@thomsen.io>
|
|
||||||
Vincent Batts <vbatts@redhat.com>
|
|
||||||
Vincent Demeester <vincent@sbr.pm>
|
|
||||||
Vincent Giersch <vincent.giersch@ovh.net>
|
|
||||||
W. Trevor King <wking@tremily.us>
|
|
||||||
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
|
|
||||||
xg.song <xg.song@venusource.com>
|
|
||||||
xiekeyang <xiekeyang@huawei.com>
|
|
||||||
Yann ROBERT <yann.robert@anantaplex.fr>
|
|
||||||
yuzou <zouyu7@huawei.com>
|
|
||||||
姜继忠 <jizhong.jiangjz@alibaba-inc.com>
|
|
@ -1,15 +0,0 @@
|
|||||||
Anton Povarov <anton.povarov@gmail.com>
|
|
||||||
Clayton Coleman <ccoleman@redhat.com>
|
|
||||||
Denis Smirnov <denis.smirnov.91@gmail.com>
|
|
||||||
DongYun Kang <ceram1000@gmail.com>
|
|
||||||
Dwayne Schultz <dschultz@pivotal.io>
|
|
||||||
Georg Apitz <gapitz@pivotal.io>
|
|
||||||
Gustav Paul <gustav.paul@gmail.com>
|
|
||||||
John Tuley <john@tuley.org>
|
|
||||||
Laurent <laurent@adyoulike.com>
|
|
||||||
Patrick Lee <patrick@dropbox.com>
|
|
||||||
Stephen J Day <stephen.day@docker.com>
|
|
||||||
Tamir Duberstein <tamird@gmail.com>
|
|
||||||
Todd Eisenberger <teisenberger@dropbox.com>
|
|
||||||
Tormod Erevik Lea <tormodlea@gmail.com>
|
|
||||||
Walter Schulze <awalterschulze@gmail.com>
|
|
@ -1,3 +0,0 @@
|
|||||||
# This source code refers to The Go Authors for copyright purposes.
|
|
||||||
# The master list of authors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/AUTHORS.
|
|
@ -1,3 +0,0 @@
|
|||||||
# This source code was written by the Go contributors.
|
|
||||||
# The master list of contributors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
@ -416,7 +416,7 @@ func Set(name, value string) error {
|
|||||||
// otherwise, the default values of all defined flags in the set.
|
// otherwise, the default values of all defined flags in the set.
|
||||||
func (f *FlagSet) PrintDefaults() {
|
func (f *FlagSet) PrintDefaults() {
|
||||||
usages := f.FlagUsages()
|
usages := f.FlagUsages()
|
||||||
fmt.Fprint(f.out(), usages)
|
fmt.Fprintf(f.out(), "%s", usages)
|
||||||
}
|
}
|
||||||
|
|
||||||
// defaultIsZeroValue returns true if the default value for this flag represents
|
// defaultIsZeroValue returns true if the default value for this flag represents
|
||||||
@ -514,7 +514,7 @@ func (f *FlagSet) FlagUsages() string {
|
|||||||
if len(flag.NoOptDefVal) > 0 {
|
if len(flag.NoOptDefVal) > 0 {
|
||||||
switch flag.Value.Type() {
|
switch flag.Value.Type() {
|
||||||
case "string":
|
case "string":
|
||||||
line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
|
line += fmt.Sprintf("[=%q]", flag.NoOptDefVal)
|
||||||
case "bool":
|
case "bool":
|
||||||
if flag.NoOptDefVal != "true" {
|
if flag.NoOptDefVal != "true" {
|
||||||
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
|
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
|
||||||
@ -534,7 +534,7 @@ func (f *FlagSet) FlagUsages() string {
|
|||||||
line += usage
|
line += usage
|
||||||
if !flag.defaultIsZeroValue() {
|
if !flag.defaultIsZeroValue() {
|
||||||
if flag.Value.Type() == "string" {
|
if flag.Value.Type() == "string" {
|
||||||
line += fmt.Sprintf(" (default \"%s\")", flag.DefValue)
|
line += fmt.Sprintf(" (default %q)", flag.DefValue)
|
||||||
} else {
|
} else {
|
||||||
line += fmt.Sprintf(" (default %s)", flag.DefValue)
|
line += fmt.Sprintf(" (default %s)", flag.DefValue)
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package pflag
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = fmt.Fprint
|
var _ = fmt.Fprint
|
||||||
@ -39,7 +40,7 @@ func (s *stringArrayValue) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func stringArrayConv(sval string) (interface{}, error) {
|
func stringArrayConv(sval string) (interface{}, error) {
|
||||||
sval = sval[1 : len(sval)-1]
|
sval = strings.Trim(sval, "[]")
|
||||||
// An empty string would cause a array with one (empty) string
|
// An empty string would cause a array with one (empty) string
|
||||||
if len(sval) == 0 {
|
if len(sval) == 0 {
|
||||||
return []string{}, nil
|
return []string{}, nil
|
||||||
|
@ -66,7 +66,7 @@ func (s *stringSliceValue) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func stringSliceConv(sval string) (interface{}, error) {
|
func stringSliceConv(sval string) (interface{}, error) {
|
||||||
sval = sval[1 : len(sval)-1]
|
sval = strings.Trim(sval, "[]")
|
||||||
// An empty string would cause a slice with one (empty) string
|
// An empty string would cause a slice with one (empty) string
|
||||||
if len(sval) == 0 {
|
if len(sval) == 0 {
|
||||||
return []string{}, nil
|
return []string{}, nil
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
# This source code refers to The Go Authors for copyright purposes.
|
|
||||||
# The master list of authors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/AUTHORS.
|
|
@ -1,3 +0,0 @@
|
|||||||
# This source code was written by the Go contributors.
|
|
||||||
# The master list of contributors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
@ -1,3 +0,0 @@
|
|||||||
# This source code refers to The Go Authors for copyright purposes.
|
|
||||||
# The master list of authors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/AUTHORS.
|
|
@ -1,3 +0,0 @@
|
|||||||
# This source code was written by the Go contributors.
|
|
||||||
# The master list of contributors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
@ -1,3 +0,0 @@
|
|||||||
# This source code refers to The Go Authors for copyright purposes.
|
|
||||||
# The master list of authors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/AUTHORS.
|
|
@ -1,3 +0,0 @@
|
|||||||
# This source code was written by the Go contributors.
|
|
||||||
# The master list of contributors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
@ -1,3 +0,0 @@
|
|||||||
# This source code refers to The Go Authors for copyright purposes.
|
|
||||||
# The master list of authors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/AUTHORS.
|
|
@ -1,3 +0,0 @@
|
|||||||
# This source code was written by the Go contributors.
|
|
||||||
# The master list of contributors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
@ -43,18 +43,26 @@ const (
|
|||||||
dynamicKubeletConfig = "DynamicKubeletConfig"
|
dynamicKubeletConfig = "DynamicKubeletConfig"
|
||||||
dynamicVolumeProvisioning = "DynamicVolumeProvisioning"
|
dynamicVolumeProvisioning = "DynamicVolumeProvisioning"
|
||||||
streamingProxyRedirects = "StreamingProxyRedirects"
|
streamingProxyRedirects = "StreamingProxyRedirects"
|
||||||
|
|
||||||
|
// experimentalHostUserNamespaceDefaulting Default userns=host for containers
|
||||||
|
// that are using other host namespaces, host mounts, the pod contains a privileged container,
|
||||||
|
// or specific non-namespaced capabilities
|
||||||
|
// (MKNOD, SYS_MODULE, SYS_TIME). This should only be enabled if user namespace remapping is enabled
|
||||||
|
// in the docker daemon.
|
||||||
|
experimentalHostUserNamespaceDefaultingGate = "ExperimentalHostUserNamespaceDefaulting"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Default values for recorded features. Every new feature gate should be
|
// Default values for recorded features. Every new feature gate should be
|
||||||
// represented here.
|
// represented here.
|
||||||
knownFeatures = map[string]featureSpec{
|
knownFeatures = map[string]featureSpec{
|
||||||
allAlphaGate: {false, alpha},
|
allAlphaGate: {false, alpha},
|
||||||
externalTrafficLocalOnly: {true, beta},
|
externalTrafficLocalOnly: {true, beta},
|
||||||
appArmor: {true, beta},
|
appArmor: {true, beta},
|
||||||
dynamicKubeletConfig: {false, alpha},
|
dynamicKubeletConfig: {false, alpha},
|
||||||
dynamicVolumeProvisioning: {true, alpha},
|
dynamicVolumeProvisioning: {true, alpha},
|
||||||
streamingProxyRedirects: {false, alpha},
|
streamingProxyRedirects: {false, alpha},
|
||||||
|
experimentalHostUserNamespaceDefaultingGate: {false, alpha},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Special handling for a few gates.
|
// Special handling for a few gates.
|
||||||
@ -115,6 +123,10 @@ type FeatureGate interface {
|
|||||||
// owner: timstclair
|
// owner: timstclair
|
||||||
// alpha: v1.5
|
// alpha: v1.5
|
||||||
StreamingProxyRedirects() bool
|
StreamingProxyRedirects() bool
|
||||||
|
|
||||||
|
// owner: @pweil-
|
||||||
|
// alpha: v1.5
|
||||||
|
ExperimentalHostUserNamespaceDefaulting() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// featureGate implements FeatureGate as well as pflag.Value for flag parsing.
|
// featureGate implements FeatureGate as well as pflag.Value for flag parsing.
|
||||||
@ -209,6 +221,11 @@ func (f *featureGate) StreamingProxyRedirects() bool {
|
|||||||
return f.lookup(streamingProxyRedirects)
|
return f.lookup(streamingProxyRedirects)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExperimentalHostUserNamespaceDefaulting returns value for experimentalHostUserNamespaceDefaulting
|
||||||
|
func (f *featureGate) ExperimentalHostUserNamespaceDefaulting() bool {
|
||||||
|
return f.lookup(experimentalHostUserNamespaceDefaultingGate)
|
||||||
|
}
|
||||||
|
|
||||||
func (f *featureGate) lookup(key string) bool {
|
func (f *featureGate) lookup(key string) bool {
|
||||||
defaultValue := f.known[key].enabled
|
defaultValue := f.known[key].enabled
|
||||||
if f.enabled != nil {
|
if f.enabled != nil {
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"k8s.io/client-go/discovery"
|
||||||
forkedjson "k8s.io/client-go/pkg/third_party/forked/golang/json"
|
forkedjson "k8s.io/client-go/pkg/third_party/forked/golang/json"
|
||||||
"k8s.io/client-go/pkg/util/json"
|
"k8s.io/client-go/pkg/util/json"
|
||||||
|
|
||||||
@ -38,11 +39,20 @@ import (
|
|||||||
// Some of the content of this package was borrowed with minor adaptations from
|
// Some of the content of this package was borrowed with minor adaptations from
|
||||||
// evanphx/json-patch and openshift/origin.
|
// evanphx/json-patch and openshift/origin.
|
||||||
|
|
||||||
|
type StrategicMergePatchVersion string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
directiveMarker = "$patch"
|
directiveMarker = "$patch"
|
||||||
deleteDirective = "delete"
|
deleteDirective = "delete"
|
||||||
replaceDirective = "replace"
|
replaceDirective = "replace"
|
||||||
mergeDirective = "merge"
|
mergeDirective = "merge"
|
||||||
|
mergePrimitivesListDirective = "mergeprimitiveslist"
|
||||||
|
|
||||||
|
// different versions of StrategicMergePatch
|
||||||
|
SMPatchVersion_1_0 StrategicMergePatchVersion = "v1.0.0"
|
||||||
|
SMPatchVersion_1_5 StrategicMergePatchVersion = "v1.5.0"
|
||||||
|
Unknown StrategicMergePatchVersion = "Unknown"
|
||||||
|
SMPatchVersionLatest = SMPatchVersion_1_5
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsPreconditionFailed returns true if the provided error indicates
|
// IsPreconditionFailed returns true if the provided error indicates
|
||||||
@ -87,6 +97,7 @@ func IsConflict(err error) bool {
|
|||||||
|
|
||||||
var errBadJSONDoc = fmt.Errorf("Invalid JSON document")
|
var errBadJSONDoc = fmt.Errorf("Invalid JSON document")
|
||||||
var errNoListOfLists = fmt.Errorf("Lists of lists are not supported")
|
var errNoListOfLists = fmt.Errorf("Lists of lists are not supported")
|
||||||
|
var errNoElementsInSlice = fmt.Errorf("no elements in any of the given slices")
|
||||||
|
|
||||||
// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge.
|
// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge.
|
||||||
// Instead of defining a Delta that holds an original, a patch and a set of preconditions,
|
// Instead of defining a Delta that holds an original, a patch and a set of preconditions,
|
||||||
@ -133,15 +144,15 @@ func RequireMetadataKeyUnchanged(key string) PreconditionFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Use the synonym CreateTwoWayMergePatch, instead.
|
// Deprecated: Use the synonym CreateTwoWayMergePatch, instead.
|
||||||
func CreateStrategicMergePatch(original, modified []byte, dataStruct interface{}) ([]byte, error) {
|
func CreateStrategicMergePatch(original, modified []byte, dataStruct interface{}, smPatchVersion StrategicMergePatchVersion) ([]byte, error) {
|
||||||
return CreateTwoWayMergePatch(original, modified, dataStruct)
|
return CreateTwoWayMergePatch(original, modified, dataStruct, smPatchVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original
|
// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original
|
||||||
// document and a modified document, which are passed to the method as json encoded content. It will
|
// document and a modified document, which are passed to the method as json encoded content. It will
|
||||||
// return a patch that yields the modified document when applied to the original document, or an error
|
// return a patch that yields the modified document when applied to the original document, or an error
|
||||||
// if either of the two documents is invalid.
|
// if either of the two documents is invalid.
|
||||||
func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...PreconditionFunc) ([]byte, error) {
|
func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, smPatchVersion StrategicMergePatchVersion, fns ...PreconditionFunc) ([]byte, error) {
|
||||||
originalMap := map[string]interface{}{}
|
originalMap := map[string]interface{}{}
|
||||||
if len(original) > 0 {
|
if len(original) > 0 {
|
||||||
if err := json.Unmarshal(original, &originalMap); err != nil {
|
if err := json.Unmarshal(original, &originalMap); err != nil {
|
||||||
@ -161,7 +172,7 @@ func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, f
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
patchMap, err := diffMaps(originalMap, modifiedMap, t, false, false)
|
patchMap, err := diffMaps(originalMap, modifiedMap, t, false, false, smPatchVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -177,7 +188,7 @@ func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, f
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns a (recursive) strategic merge patch that yields modified when applied to original.
|
// Returns a (recursive) strategic merge patch that yields modified when applied to original.
|
||||||
func diffMaps(original, modified map[string]interface{}, t reflect.Type, ignoreChangesAndAdditions, ignoreDeletions bool) (map[string]interface{}, error) {
|
func diffMaps(original, modified map[string]interface{}, t reflect.Type, ignoreChangesAndAdditions, ignoreDeletions bool, smPatchVersion StrategicMergePatchVersion) (map[string]interface{}, error) {
|
||||||
patch := map[string]interface{}{}
|
patch := map[string]interface{}{}
|
||||||
if t.Kind() == reflect.Ptr {
|
if t.Kind() == reflect.Ptr {
|
||||||
t = t.Elem()
|
t = t.Elem()
|
||||||
@ -230,7 +241,7 @@ func diffMaps(original, modified map[string]interface{}, t reflect.Type, ignoreC
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
patchValue, err := diffMaps(originalValueTyped, modifiedValueTyped, fieldType, ignoreChangesAndAdditions, ignoreDeletions)
|
patchValue, err := diffMaps(originalValueTyped, modifiedValueTyped, fieldType, ignoreChangesAndAdditions, ignoreDeletions, smPatchVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -248,13 +259,25 @@ func diffMaps(original, modified map[string]interface{}, t reflect.Type, ignoreC
|
|||||||
}
|
}
|
||||||
|
|
||||||
if fieldPatchStrategy == mergeDirective {
|
if fieldPatchStrategy == mergeDirective {
|
||||||
patchValue, err := diffLists(originalValueTyped, modifiedValueTyped, fieldType.Elem(), fieldPatchMergeKey, ignoreChangesAndAdditions, ignoreDeletions)
|
patchValue, err := diffLists(originalValueTyped, modifiedValueTyped, fieldType.Elem(), fieldPatchMergeKey, ignoreChangesAndAdditions, ignoreDeletions, smPatchVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if patchValue == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if len(patchValue) > 0 {
|
switch typedPatchValue := patchValue.(type) {
|
||||||
patch[key] = patchValue
|
case []interface{}:
|
||||||
|
if len(typedPatchValue) > 0 {
|
||||||
|
patch[key] = typedPatchValue
|
||||||
|
}
|
||||||
|
case map[string]interface{}:
|
||||||
|
if len(typedPatchValue) > 0 {
|
||||||
|
patch[key] = typedPatchValue
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("invalid type of patch: %v", reflect.TypeOf(patchValue))
|
||||||
}
|
}
|
||||||
|
|
||||||
continue
|
continue
|
||||||
@ -284,7 +307,7 @@ func diffMaps(original, modified map[string]interface{}, t reflect.Type, ignoreC
|
|||||||
|
|
||||||
// Returns a (recursive) strategic merge patch that yields modified when applied to original,
|
// Returns a (recursive) strategic merge patch that yields modified when applied to original,
|
||||||
// for a pair of lists with merge semantics.
|
// for a pair of lists with merge semantics.
|
||||||
func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreChangesAndAdditions, ignoreDeletions bool) ([]interface{}, error) {
|
func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreChangesAndAdditions, ignoreDeletions bool, smPatchVersion StrategicMergePatchVersion) (interface{}, error) {
|
||||||
if len(original) == 0 {
|
if len(original) == 0 {
|
||||||
if len(modified) == 0 || ignoreChangesAndAdditions {
|
if len(modified) == 0 || ignoreChangesAndAdditions {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@ -298,12 +321,14 @@ func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var patch []interface{}
|
var patch interface{}
|
||||||
|
|
||||||
if elementType.Kind() == reflect.Map {
|
if elementType.Kind() == reflect.Map {
|
||||||
patch, err = diffListsOfMaps(original, modified, t, mergeKey, ignoreChangesAndAdditions, ignoreDeletions)
|
patch, err = diffListsOfMaps(original, modified, t, mergeKey, ignoreChangesAndAdditions, ignoreDeletions, smPatchVersion)
|
||||||
} else if !ignoreChangesAndAdditions {
|
} else if elementType.Kind() == reflect.Slice {
|
||||||
patch, err = diffListsOfScalars(original, modified)
|
err = errNoListOfLists
|
||||||
|
} else {
|
||||||
|
patch, err = diffListsOfScalars(original, modified, ignoreChangesAndAdditions, ignoreDeletions, smPatchVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -315,8 +340,23 @@ func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string
|
|||||||
|
|
||||||
// Returns a (recursive) strategic merge patch that yields modified when applied to original,
|
// Returns a (recursive) strategic merge patch that yields modified when applied to original,
|
||||||
// for a pair of lists of scalars with merge semantics.
|
// for a pair of lists of scalars with merge semantics.
|
||||||
func diffListsOfScalars(original, modified []interface{}) ([]interface{}, error) {
|
func diffListsOfScalars(original, modified []interface{}, ignoreChangesAndAdditions, ignoreDeletions bool, smPatchVersion StrategicMergePatchVersion) (interface{}, error) {
|
||||||
if len(modified) == 0 {
|
originalScalars := uniqifyAndSortScalars(original)
|
||||||
|
modifiedScalars := uniqifyAndSortScalars(modified)
|
||||||
|
|
||||||
|
switch smPatchVersion {
|
||||||
|
case SMPatchVersion_1_5:
|
||||||
|
return diffListsOfScalarsIntoMap(originalScalars, modifiedScalars, ignoreChangesAndAdditions, ignoreDeletions)
|
||||||
|
case SMPatchVersion_1_0:
|
||||||
|
return diffListsOfScalarsIntoSlice(originalScalars, modifiedScalars, ignoreChangesAndAdditions, ignoreDeletions)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unknown StrategicMergePatchVersion: %v", smPatchVersion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func diffListsOfScalarsIntoSlice(originalScalars, modifiedScalars []interface{}, ignoreChangesAndAdditions, ignoreDeletions bool) ([]interface{}, error) {
|
||||||
|
originalIndex, modifiedIndex := 0, 0
|
||||||
|
if len(modifiedScalars) == 0 {
|
||||||
// There is no need to check the length of original because there is no way to create
|
// There is no need to check the length of original because there is no way to create
|
||||||
// a patch that deletes a scalar from a list of scalars with merge semantics.
|
// a patch that deletes a scalar from a list of scalars with merge semantics.
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@ -324,18 +364,14 @@ func diffListsOfScalars(original, modified []interface{}) ([]interface{}, error)
|
|||||||
|
|
||||||
patch := []interface{}{}
|
patch := []interface{}{}
|
||||||
|
|
||||||
originalScalars := uniqifyAndSortScalars(original)
|
|
||||||
modifiedScalars := uniqifyAndSortScalars(modified)
|
|
||||||
originalIndex, modifiedIndex := 0, 0
|
|
||||||
|
|
||||||
loopB:
|
loopB:
|
||||||
for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ {
|
for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ {
|
||||||
for ; originalIndex < len(originalScalars); originalIndex++ {
|
for ; originalIndex < len(originalScalars); originalIndex++ {
|
||||||
originalString := fmt.Sprintf("%v", original[originalIndex])
|
originalString := fmt.Sprintf("%v", originalScalars[originalIndex])
|
||||||
modifiedString := fmt.Sprintf("%v", modified[modifiedIndex])
|
modifiedString := fmt.Sprintf("%v", modifiedScalars[modifiedIndex])
|
||||||
if originalString >= modifiedString {
|
if originalString >= modifiedString {
|
||||||
if originalString != modifiedString {
|
if originalString != modifiedString {
|
||||||
patch = append(patch, modified[modifiedIndex])
|
patch = append(patch, modifiedScalars[modifiedIndex])
|
||||||
}
|
}
|
||||||
|
|
||||||
continue loopB
|
continue loopB
|
||||||
@ -349,7 +385,57 @@ loopB:
|
|||||||
|
|
||||||
// Add any remaining items found only in modified
|
// Add any remaining items found only in modified
|
||||||
for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ {
|
for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ {
|
||||||
patch = append(patch, modified[modifiedIndex])
|
patch = append(patch, modifiedScalars[modifiedIndex])
|
||||||
|
}
|
||||||
|
|
||||||
|
return patch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func diffListsOfScalarsIntoMap(originalScalars, modifiedScalars []interface{}, ignoreChangesAndAdditions, ignoreDeletions bool) (map[string]interface{}, error) {
|
||||||
|
originalIndex, modifiedIndex := 0, 0
|
||||||
|
patch := map[string]interface{}{}
|
||||||
|
patch[directiveMarker] = mergePrimitivesListDirective
|
||||||
|
|
||||||
|
for originalIndex < len(originalScalars) && modifiedIndex < len(modifiedScalars) {
|
||||||
|
originalString := fmt.Sprintf("%v", originalScalars[originalIndex])
|
||||||
|
modifiedString := fmt.Sprintf("%v", modifiedScalars[modifiedIndex])
|
||||||
|
|
||||||
|
// objects are identical
|
||||||
|
if originalString == modifiedString {
|
||||||
|
originalIndex++
|
||||||
|
modifiedIndex++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if originalString > modifiedString {
|
||||||
|
if !ignoreChangesAndAdditions {
|
||||||
|
modifiedValue := modifiedScalars[modifiedIndex]
|
||||||
|
patch[modifiedString] = modifiedValue
|
||||||
|
}
|
||||||
|
modifiedIndex++
|
||||||
|
} else {
|
||||||
|
if !ignoreDeletions {
|
||||||
|
patch[originalString] = nil
|
||||||
|
}
|
||||||
|
originalIndex++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete any remaining items found only in original
|
||||||
|
if !ignoreDeletions {
|
||||||
|
for ; originalIndex < len(originalScalars); originalIndex++ {
|
||||||
|
originalString := fmt.Sprintf("%v", originalScalars[originalIndex])
|
||||||
|
patch[originalString] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add any remaining items found only in modified
|
||||||
|
if !ignoreChangesAndAdditions {
|
||||||
|
for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ {
|
||||||
|
modifiedString := fmt.Sprintf("%v", modifiedScalars[modifiedIndex])
|
||||||
|
modifiedValue := modifiedScalars[modifiedIndex]
|
||||||
|
patch[modifiedString] = modifiedValue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return patch, nil
|
return patch, nil
|
||||||
@ -360,7 +446,7 @@ var errBadArgTypeFmt = "expected a %s, but received a %s"
|
|||||||
|
|
||||||
// Returns a (recursive) strategic merge patch that yields modified when applied to original,
|
// Returns a (recursive) strategic merge patch that yields modified when applied to original,
|
||||||
// for a pair of lists of maps with merge semantics.
|
// for a pair of lists of maps with merge semantics.
|
||||||
func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreChangesAndAdditions, ignoreDeletions bool) ([]interface{}, error) {
|
func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreChangesAndAdditions, ignoreDeletions bool, smPatchVersion StrategicMergePatchVersion) ([]interface{}, error) {
|
||||||
patch := make([]interface{}, 0)
|
patch := make([]interface{}, 0)
|
||||||
|
|
||||||
originalSorted, err := sortMergeListsByNameArray(original, t, mergeKey, false)
|
originalSorted, err := sortMergeListsByNameArray(original, t, mergeKey, false)
|
||||||
@ -406,7 +492,7 @@ loopB:
|
|||||||
if originalString >= modifiedString {
|
if originalString >= modifiedString {
|
||||||
if originalString == modifiedString {
|
if originalString == modifiedString {
|
||||||
// Merge key values are equal, so recurse
|
// Merge key values are equal, so recurse
|
||||||
patchValue, err := diffMaps(originalMap, modifiedMap, t, ignoreChangesAndAdditions, ignoreDeletions)
|
patchValue, err := diffMaps(originalMap, modifiedMap, t, ignoreChangesAndAdditions, ignoreDeletions, smPatchVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -542,7 +628,15 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type) (map[strin
|
|||||||
return map[string]interface{}{}, nil
|
return map[string]interface{}{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf(errBadPatchTypeFmt, v, patch)
|
if v == mergePrimitivesListDirective {
|
||||||
|
// delete the directiveMarker's key-value pair to avoid delta map and delete map
|
||||||
|
// overlaping with each other when calculating a ThreeWayDiff for list of Primitives.
|
||||||
|
// Otherwise, the overlaping will cause it calling LookupPatchMetadata() which will
|
||||||
|
// return an error since the metadata shows it's a slice but it is actually a map.
|
||||||
|
delete(original, directiveMarker)
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf(errBadPatchTypeFmt, v, patch)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nil is an accepted value for original to simplify logic in other places.
|
// nil is an accepted value for original to simplify logic in other places.
|
||||||
@ -578,7 +672,9 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type) (map[strin
|
|||||||
// If they're both maps or lists, recurse into the value.
|
// If they're both maps or lists, recurse into the value.
|
||||||
originalType := reflect.TypeOf(original[k])
|
originalType := reflect.TypeOf(original[k])
|
||||||
patchType := reflect.TypeOf(patchV)
|
patchType := reflect.TypeOf(patchV)
|
||||||
if originalType == patchType {
|
// check if we are trying to merge a slice with a map for list of primitives
|
||||||
|
isMergeSliceOfPrimitivesWithAPatchMap := originalType != nil && patchType != nil && originalType.Kind() == reflect.Slice && patchType.Kind() == reflect.Map
|
||||||
|
if originalType == patchType || isMergeSliceOfPrimitivesWithAPatchMap {
|
||||||
// First find the fieldPatchStrategy and fieldPatchMergeKey.
|
// First find the fieldPatchStrategy and fieldPatchMergeKey.
|
||||||
fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k)
|
fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -600,9 +696,8 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type) (map[strin
|
|||||||
if originalType.Kind() == reflect.Slice && fieldPatchStrategy == mergeDirective {
|
if originalType.Kind() == reflect.Slice && fieldPatchStrategy == mergeDirective {
|
||||||
elemType := fieldType.Elem()
|
elemType := fieldType.Elem()
|
||||||
typedOriginal := original[k].([]interface{})
|
typedOriginal := original[k].([]interface{})
|
||||||
typedPatch := patchV.([]interface{})
|
|
||||||
var err error
|
var err error
|
||||||
original[k], err = mergeSlice(typedOriginal, typedPatch, elemType, fieldPatchMergeKey)
|
original[k], err = mergeSlice(typedOriginal, patchV, elemType, fieldPatchMergeKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -623,13 +718,34 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type) (map[strin
|
|||||||
// Merge two slices together. Note: This may modify both the original slice and
|
// Merge two slices together. Note: This may modify both the original slice and
|
||||||
// the patch because getting a deep copy of a slice in golang is highly
|
// the patch because getting a deep copy of a slice in golang is highly
|
||||||
// non-trivial.
|
// non-trivial.
|
||||||
func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey string) ([]interface{}, error) {
|
// The patch could be a map[string]interface{} representing a slice of primitives.
|
||||||
if len(original) == 0 && len(patch) == 0 {
|
// If the patch map doesn't has the specific directiveMarker (mergePrimitivesListDirective),
|
||||||
|
// it returns an error. Please check patch_test.go and find the test case named
|
||||||
|
// "merge lists of scalars for list of primitives" to see what the patch looks like.
|
||||||
|
// Patch is still []interface{} for all the other types.
|
||||||
|
func mergeSlice(original []interface{}, patch interface{}, elemType reflect.Type, mergeKey string) ([]interface{}, error) {
|
||||||
|
t, err := sliceElementType(original)
|
||||||
|
if err != nil && err != errNoElementsInSlice {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if patchMap, ok := patch.(map[string]interface{}); ok {
|
||||||
|
// We try to merge the original slice with a patch map only when the map has
|
||||||
|
// a specific directiveMarker. Otherwise, this patch will be treated as invalid.
|
||||||
|
if directiveValue, ok := patchMap[directiveMarker]; ok && directiveValue == mergePrimitivesListDirective {
|
||||||
|
return mergeSliceOfScalarsWithPatchMap(original, patchMap)
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("Unable to merge a slice with an invalid map")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
typedPatch := patch.([]interface{})
|
||||||
|
if len(original) == 0 && len(typedPatch) == 0 {
|
||||||
return original, nil
|
return original, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// All the values must be of the same type, but not a list.
|
// All the values must be of the same type, but not a list.
|
||||||
t, err := sliceElementType(original, patch)
|
t, err = sliceElementType(original, typedPatch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -638,7 +754,7 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s
|
|||||||
if t.Kind() != reflect.Map {
|
if t.Kind() != reflect.Map {
|
||||||
// Maybe in the future add a "concat" mode that doesn't
|
// Maybe in the future add a "concat" mode that doesn't
|
||||||
// uniqify.
|
// uniqify.
|
||||||
both := append(original, patch...)
|
both := append(original, typedPatch...)
|
||||||
return uniqifyScalars(both), nil
|
return uniqifyScalars(both), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -649,7 +765,7 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s
|
|||||||
// First look for any special $patch elements.
|
// First look for any special $patch elements.
|
||||||
patchWithoutSpecialElements := []interface{}{}
|
patchWithoutSpecialElements := []interface{}{}
|
||||||
replace := false
|
replace := false
|
||||||
for _, v := range patch {
|
for _, v := range typedPatch {
|
||||||
typedV := v.(map[string]interface{})
|
typedV := v.(map[string]interface{})
|
||||||
patchType, ok := typedV[directiveMarker]
|
patchType, ok := typedV[directiveMarker]
|
||||||
if ok {
|
if ok {
|
||||||
@ -685,10 +801,10 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s
|
|||||||
return patchWithoutSpecialElements, nil
|
return patchWithoutSpecialElements, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
patch = patchWithoutSpecialElements
|
typedPatch = patchWithoutSpecialElements
|
||||||
|
|
||||||
// Merge patch into original.
|
// Merge patch into original.
|
||||||
for _, v := range patch {
|
for _, v := range typedPatch {
|
||||||
// Because earlier we confirmed that all the elements are maps.
|
// Because earlier we confirmed that all the elements are maps.
|
||||||
typedV := v.(map[string]interface{})
|
typedV := v.(map[string]interface{})
|
||||||
mergeValue, ok := typedV[mergeKey]
|
mergeValue, ok := typedV[mergeKey]
|
||||||
@ -721,6 +837,36 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s
|
|||||||
return original, nil
|
return original, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mergeSliceOfScalarsWithPatchMap merges the original slice with a patch map and
|
||||||
|
// returns an uniqified and sorted slice of primitives.
|
||||||
|
// The patch map must have the specific directiveMarker (mergePrimitivesListDirective).
|
||||||
|
func mergeSliceOfScalarsWithPatchMap(original []interface{}, patch map[string]interface{}) ([]interface{}, error) {
|
||||||
|
// make sure the patch has the specific directiveMarker ()
|
||||||
|
if directiveValue, ok := patch[directiveMarker]; ok && directiveValue != mergePrimitivesListDirective {
|
||||||
|
return nil, fmt.Errorf("Unable to merge a slice with an invalid map")
|
||||||
|
}
|
||||||
|
delete(patch, directiveMarker)
|
||||||
|
output := make([]interface{}, 0, len(original)+len(patch))
|
||||||
|
for _, value := range original {
|
||||||
|
valueString := fmt.Sprintf("%v", value)
|
||||||
|
if v, ok := patch[valueString]; ok {
|
||||||
|
if v != nil {
|
||||||
|
output = append(output, v)
|
||||||
|
}
|
||||||
|
delete(patch, valueString)
|
||||||
|
} else {
|
||||||
|
output = append(output, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, value := range patch {
|
||||||
|
if value != nil {
|
||||||
|
output = append(output, value)
|
||||||
|
}
|
||||||
|
// No action required to delete items that missing from the original slice.
|
||||||
|
}
|
||||||
|
return uniqifyAndSortScalars(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
// This method no longer panics if any element of the slice is not a map.
|
// This method no longer panics if any element of the slice is not a map.
|
||||||
func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) {
|
func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) {
|
||||||
for k, v := range m {
|
for k, v := range m {
|
||||||
@ -946,7 +1092,7 @@ func sliceElementType(slices ...[]interface{}) (reflect.Type, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if prevType == nil {
|
if prevType == nil {
|
||||||
return nil, fmt.Errorf("no elements in any of the given slices")
|
return nil, errNoElementsInSlice
|
||||||
}
|
}
|
||||||
|
|
||||||
return prevType, nil
|
return prevType, nil
|
||||||
@ -1035,6 +1181,10 @@ func mergingMapFieldsHaveConflicts(
|
|||||||
if leftMarker != rightMarker {
|
if leftMarker != rightMarker {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if leftMarker == mergePrimitivesListDirective && rightMarker == mergePrimitivesListDirective {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check the individual keys.
|
// Check the individual keys.
|
||||||
@ -1057,12 +1207,29 @@ func mergingMapFieldsHaveConflicts(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) {
|
func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) {
|
||||||
|
isForListOfPrimitives := false
|
||||||
|
if leftDirective, ok := typedLeft[directiveMarker]; ok {
|
||||||
|
if rightDirective, ok := typedRight[directiveMarker]; ok {
|
||||||
|
if leftDirective == mergePrimitivesListDirective && rightDirective == rightDirective {
|
||||||
|
isForListOfPrimitives = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
for key, leftValue := range typedLeft {
|
for key, leftValue := range typedLeft {
|
||||||
if key != directiveMarker {
|
if key != directiveMarker {
|
||||||
if rightValue, ok := typedRight[key]; ok {
|
if rightValue, ok := typedRight[key]; ok {
|
||||||
fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(structType, key)
|
var fieldType reflect.Type
|
||||||
if err != nil {
|
var fieldPatchStrategy, fieldPatchMergeKey string
|
||||||
return true, err
|
var err error
|
||||||
|
if isForListOfPrimitives {
|
||||||
|
fieldType = reflect.TypeOf(leftValue)
|
||||||
|
fieldPatchStrategy = ""
|
||||||
|
fieldPatchMergeKey = ""
|
||||||
|
} else {
|
||||||
|
fieldType, fieldPatchStrategy, fieldPatchMergeKey, err = forkedjson.LookupPatchMetadata(structType, key)
|
||||||
|
if err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue,
|
if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue,
|
||||||
@ -1172,7 +1339,7 @@ func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, struc
|
|||||||
// than from original to current. In other words, a conflict occurs if modified changes any key
|
// than from original to current. In other words, a conflict occurs if modified changes any key
|
||||||
// in a way that is different from how it is changed in current (e.g., deleting it, changing its
|
// in a way that is different from how it is changed in current (e.g., deleting it, changing its
|
||||||
// value).
|
// value).
|
||||||
func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct interface{}, overwrite bool, fns ...PreconditionFunc) ([]byte, error) {
|
func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct interface{}, overwrite bool, smPatchVersion StrategicMergePatchVersion, fns ...PreconditionFunc) ([]byte, error) {
|
||||||
originalMap := map[string]interface{}{}
|
originalMap := map[string]interface{}{}
|
||||||
if len(original) > 0 {
|
if len(original) > 0 {
|
||||||
if err := json.Unmarshal(original, &originalMap); err != nil {
|
if err := json.Unmarshal(original, &originalMap); err != nil {
|
||||||
@ -1203,12 +1370,12 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int
|
|||||||
// from original to modified. To find it, we compute deletions, which are the deletions from
|
// from original to modified. To find it, we compute deletions, which are the deletions from
|
||||||
// original to modified, and delta, which is the difference from current to modified without
|
// original to modified, and delta, which is the difference from current to modified without
|
||||||
// deletions, and then apply delta to deletions as a patch, which should be strictly additive.
|
// deletions, and then apply delta to deletions as a patch, which should be strictly additive.
|
||||||
deltaMap, err := diffMaps(currentMap, modifiedMap, t, false, true)
|
deltaMap, err := diffMaps(currentMap, modifiedMap, t, false, true, smPatchVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
deletionsMap, err := diffMaps(originalMap, modifiedMap, t, true, false)
|
deletionsMap, err := diffMaps(originalMap, modifiedMap, t, true, false, smPatchVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1228,7 +1395,7 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int
|
|||||||
// If overwrite is false, and the patch contains any keys that were changed differently,
|
// If overwrite is false, and the patch contains any keys that were changed differently,
|
||||||
// then return a conflict error.
|
// then return a conflict error.
|
||||||
if !overwrite {
|
if !overwrite {
|
||||||
changedMap, err := diffMaps(originalMap, currentMap, t, false, false)
|
changedMap, err := diffMaps(originalMap, currentMap, t, false, false, smPatchVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1263,3 +1430,20 @@ func toYAML(v interface{}) (string, error) {
|
|||||||
|
|
||||||
return string(y), nil
|
return string(y), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetServerSupportedSMPatchVersion takes a discoveryClient,
|
||||||
|
// returns the max StrategicMergePatch version supported
|
||||||
|
func GetServerSupportedSMPatchVersion(discoveryClient discovery.DiscoveryInterface) (StrategicMergePatchVersion, error) {
|
||||||
|
serverVersion, err := discoveryClient.ServerVersion()
|
||||||
|
if err != nil {
|
||||||
|
return Unknown, err
|
||||||
|
}
|
||||||
|
serverGitVersion := serverVersion.GitVersion
|
||||||
|
if serverGitVersion >= string(SMPatchVersion_1_5) {
|
||||||
|
return SMPatchVersion_1_5, nil
|
||||||
|
}
|
||||||
|
if serverGitVersion >= string(SMPatchVersion_1_0) {
|
||||||
|
return SMPatchVersion_1_0, nil
|
||||||
|
}
|
||||||
|
return Unknown, fmt.Errorf("The version is too old: %v\n", serverVersion)
|
||||||
|
}
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
@ -74,6 +75,7 @@ func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper
|
|||||||
func (g *gcpAuthProvider) Login() error { return nil }
|
func (g *gcpAuthProvider) Login() error { return nil }
|
||||||
|
|
||||||
type cachedTokenSource struct {
|
type cachedTokenSource struct {
|
||||||
|
lk sync.Mutex
|
||||||
source oauth2.TokenSource
|
source oauth2.TokenSource
|
||||||
accessToken string
|
accessToken string
|
||||||
expiry time.Time
|
expiry time.Time
|
||||||
@ -99,11 +101,7 @@ func newCachedTokenSource(accessToken, expiry string, persister rest.AuthProvide
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *cachedTokenSource) Token() (*oauth2.Token, error) {
|
func (t *cachedTokenSource) Token() (*oauth2.Token, error) {
|
||||||
tok := &oauth2.Token{
|
tok := t.cachedToken()
|
||||||
AccessToken: t.accessToken,
|
|
||||||
TokenType: "Bearer",
|
|
||||||
Expiry: t.expiry,
|
|
||||||
}
|
|
||||||
if tok.Valid() && !tok.Expiry.IsZero() {
|
if tok.Valid() && !tok.Expiry.IsZero() {
|
||||||
return tok, nil
|
return tok, nil
|
||||||
}
|
}
|
||||||
@ -111,16 +109,39 @@ func (t *cachedTokenSource) Token() (*oauth2.Token, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
cache := t.update(tok)
|
||||||
if t.persister != nil {
|
if t.persister != nil {
|
||||||
t.cache["access-token"] = tok.AccessToken
|
if err := t.persister.Persist(cache); err != nil {
|
||||||
t.cache["expiry"] = tok.Expiry.Format(time.RFC3339Nano)
|
|
||||||
if err := t.persister.Persist(t.cache); err != nil {
|
|
||||||
glog.V(4).Infof("Failed to persist token: %v", err)
|
glog.V(4).Infof("Failed to persist token: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return tok, nil
|
return tok, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *cachedTokenSource) cachedToken() *oauth2.Token {
|
||||||
|
t.lk.Lock()
|
||||||
|
defer t.lk.Unlock()
|
||||||
|
return &oauth2.Token{
|
||||||
|
AccessToken: t.accessToken,
|
||||||
|
TokenType: "Bearer",
|
||||||
|
Expiry: t.expiry,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *cachedTokenSource) update(tok *oauth2.Token) map[string]string {
|
||||||
|
t.lk.Lock()
|
||||||
|
defer t.lk.Unlock()
|
||||||
|
t.accessToken = tok.AccessToken
|
||||||
|
t.expiry = tok.Expiry
|
||||||
|
ret := map[string]string{}
|
||||||
|
for k, v := range t.cache {
|
||||||
|
ret[k] = v
|
||||||
|
}
|
||||||
|
ret["access-token"] = t.accessToken
|
||||||
|
ret["expiry"] = t.expiry.Format(time.RFC3339Nano)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
type commandTokenSource struct {
|
type commandTokenSource struct {
|
||||||
cmd string
|
cmd string
|
||||||
args []string
|
args []string
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -141,3 +142,70 @@ func TestCmdTokenSource(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fakePersister struct {
|
||||||
|
lk sync.Mutex
|
||||||
|
cache map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakePersister) Persist(cache map[string]string) error {
|
||||||
|
f.lk.Lock()
|
||||||
|
defer f.lk.Unlock()
|
||||||
|
f.cache = map[string]string{}
|
||||||
|
for k, v := range cache {
|
||||||
|
f.cache[k] = v
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakePersister) read() map[string]string {
|
||||||
|
ret := map[string]string{}
|
||||||
|
f.lk.Lock()
|
||||||
|
for k, v := range f.cache {
|
||||||
|
ret[k] = v
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
type fakeTokenSource struct {
|
||||||
|
token *oauth2.Token
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeTokenSource) Token() (*oauth2.Token, error) {
|
||||||
|
return f.token, f.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCachedTokenSource(t *testing.T) {
|
||||||
|
tok := &oauth2.Token{AccessToken: "fakeaccesstoken"}
|
||||||
|
persister := &fakePersister{}
|
||||||
|
source := &fakeTokenSource{
|
||||||
|
token: tok,
|
||||||
|
err: nil,
|
||||||
|
}
|
||||||
|
cache := map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
"baz": "bazinga",
|
||||||
|
}
|
||||||
|
ts, err := newCachedTokenSource("fakeaccesstoken", "", persister, source, cache)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(10)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
go func() {
|
||||||
|
_, err := ts.Token()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
cache["access-token"] = "fakeaccesstoken"
|
||||||
|
cache["expiry"] = tok.Expiry.Format(time.RFC3339Nano)
|
||||||
|
if got := persister.read(); !reflect.DeepEqual(got, cache) {
|
||||||
|
t.Errorf("got cache %v, want %v", got, cache)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -70,8 +70,8 @@ type Config struct {
|
|||||||
// TODO: demonstrate an OAuth2 compatible client.
|
// TODO: demonstrate an OAuth2 compatible client.
|
||||||
BearerToken string
|
BearerToken string
|
||||||
|
|
||||||
// Impersonate is the username that this RESTClient will impersonate
|
// Impersonate is the configuration that RESTClient will use for impersonation.
|
||||||
Impersonate string
|
Impersonate ImpersonationConfig
|
||||||
|
|
||||||
// Server requires plugin-specified authentication.
|
// Server requires plugin-specified authentication.
|
||||||
AuthProvider *clientcmdapi.AuthProviderConfig
|
AuthProvider *clientcmdapi.AuthProviderConfig
|
||||||
@ -118,6 +118,17 @@ type Config struct {
|
|||||||
// Version string
|
// Version string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImpersonationConfig has all the available impersonation options
|
||||||
|
type ImpersonationConfig struct {
|
||||||
|
// UserName is the username to impersonate on each request.
|
||||||
|
UserName string
|
||||||
|
// Groups are the groups to impersonate on each request.
|
||||||
|
Groups []string
|
||||||
|
// Extra is a free-form field which can be used to link some authentication information
|
||||||
|
// to authorization information. This field allows you to impersonate it.
|
||||||
|
Extra map[string][]string
|
||||||
|
}
|
||||||
|
|
||||||
// TLSClientConfig contains settings to enable transport layer security
|
// TLSClientConfig contains settings to enable transport layer security
|
||||||
type TLSClientConfig struct {
|
type TLSClientConfig struct {
|
||||||
// Server requires TLS client certificate authentication
|
// Server requires TLS client certificate authentication
|
||||||
|
@ -205,7 +205,7 @@ func TestAnonymousConfig(t *testing.T) {
|
|||||||
|
|
||||||
// this is the list of known security related fields, add to this list if a new field
|
// this is the list of known security related fields, add to this list if a new field
|
||||||
// is added to Config, update AnonymousClientConfig to preserve the field otherwise.
|
// is added to Config, update AnonymousClientConfig to preserve the field otherwise.
|
||||||
expected.Impersonate = ""
|
expected.Impersonate = ImpersonationConfig{}
|
||||||
expected.BearerToken = ""
|
expected.BearerToken = ""
|
||||||
expected.Username = ""
|
expected.Username = ""
|
||||||
expected.Password = ""
|
expected.Password = ""
|
||||||
|
@ -89,6 +89,10 @@ func (c *Config) TransportConfig() (*transport.Config, error) {
|
|||||||
Username: c.Username,
|
Username: c.Username,
|
||||||
Password: c.Password,
|
Password: c.Password,
|
||||||
BearerToken: c.BearerToken,
|
BearerToken: c.BearerToken,
|
||||||
Impersonate: c.Impersonate,
|
Impersonate: transport.ImpersonationConfig{
|
||||||
|
UserName: c.Impersonate.UserName,
|
||||||
|
Groups: c.Impersonate.Groups,
|
||||||
|
Extra: c.Impersonate.Extra,
|
||||||
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -144,7 +144,7 @@ func (config *DirectClientConfig) ClientConfig() (*rest.Config, error) {
|
|||||||
clientConfig.Host = u.String()
|
clientConfig.Host = u.String()
|
||||||
}
|
}
|
||||||
if len(configAuthInfo.Impersonate) > 0 {
|
if len(configAuthInfo.Impersonate) > 0 {
|
||||||
clientConfig.Impersonate = configAuthInfo.Impersonate
|
clientConfig.Impersonate = rest.ImpersonationConfig{UserName: configAuthInfo.Impersonate}
|
||||||
}
|
}
|
||||||
|
|
||||||
// only try to read the auth information if we are secure
|
// only try to read the auth information if we are secure
|
||||||
@ -215,7 +215,7 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fa
|
|||||||
mergedConfig.BearerToken = string(tokenBytes)
|
mergedConfig.BearerToken = string(tokenBytes)
|
||||||
}
|
}
|
||||||
if len(configAuthInfo.Impersonate) > 0 {
|
if len(configAuthInfo.Impersonate) > 0 {
|
||||||
mergedConfig.Impersonate = configAuthInfo.Impersonate
|
mergedConfig.Impersonate = rest.ImpersonationConfig{UserName: configAuthInfo.Impersonate}
|
||||||
}
|
}
|
||||||
if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 {
|
if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 {
|
||||||
mergedConfig.CertFile = configAuthInfo.ClientCertificate
|
mergedConfig.CertFile = configAuthInfo.ClientCertificate
|
||||||
|
@ -244,7 +244,9 @@ func (e *eventLogger) eventObserve(newEvent *v1.Event) (*v1.Event, []byte, error
|
|||||||
|
|
||||||
newData, _ := json.Marshal(event)
|
newData, _ := json.Marshal(event)
|
||||||
oldData, _ := json.Marshal(eventCopy2)
|
oldData, _ := json.Marshal(eventCopy2)
|
||||||
patch, err = strategicpatch.CreateStrategicMergePatch(oldData, newData, event)
|
// TODO: need to figure out if we need to let eventObserve() use the new behavior of StrategicMergePatch.
|
||||||
|
// Currently default to old behavior now. Ref: issue #35936
|
||||||
|
patch, err = strategicpatch.CreateStrategicMergePatch(oldData, newData, event, strategicpatch.SMPatchVersion_1_0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// record our new observation
|
// record our new observation
|
||||||
|
@ -34,8 +34,8 @@ type Config struct {
|
|||||||
// Bearer token for authentication
|
// Bearer token for authentication
|
||||||
BearerToken string
|
BearerToken string
|
||||||
|
|
||||||
// Impersonate is the username that this Config will impersonate
|
// Impersonate is the config that this Config will impersonate using
|
||||||
Impersonate string
|
Impersonate ImpersonationConfig
|
||||||
|
|
||||||
// Transport may be used for custom HTTP behavior. This attribute may
|
// Transport may be used for custom HTTP behavior. This attribute may
|
||||||
// not be specified with the TLS client certificate options. Use
|
// not be specified with the TLS client certificate options. Use
|
||||||
@ -50,6 +50,16 @@ type Config struct {
|
|||||||
WrapTransport func(rt http.RoundTripper) http.RoundTripper
|
WrapTransport func(rt http.RoundTripper) http.RoundTripper
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImpersonationConfig has all the available impersonation options
|
||||||
|
type ImpersonationConfig struct {
|
||||||
|
// UserName matches user.Info.GetName()
|
||||||
|
UserName string
|
||||||
|
// Groups matches user.Info.GetGroups()
|
||||||
|
Groups []string
|
||||||
|
// Extra matches user.Info.GetExtra()
|
||||||
|
Extra map[string][]string
|
||||||
|
}
|
||||||
|
|
||||||
// HasCA returns whether the configuration has a certificate authority or not.
|
// HasCA returns whether the configuration has a certificate authority or not.
|
||||||
func (c *Config) HasCA() bool {
|
func (c *Config) HasCA() bool {
|
||||||
return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0
|
return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0
|
||||||
|
@ -48,7 +48,9 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip
|
|||||||
if len(config.UserAgent) > 0 {
|
if len(config.UserAgent) > 0 {
|
||||||
rt = NewUserAgentRoundTripper(config.UserAgent, rt)
|
rt = NewUserAgentRoundTripper(config.UserAgent, rt)
|
||||||
}
|
}
|
||||||
if len(config.Impersonate) > 0 {
|
if len(config.Impersonate.UserName) > 0 ||
|
||||||
|
len(config.Impersonate.Groups) > 0 ||
|
||||||
|
len(config.Impersonate.Extra) > 0 {
|
||||||
rt = NewImpersonatingRoundTripper(config.Impersonate, rt)
|
rt = NewImpersonatingRoundTripper(config.Impersonate, rt)
|
||||||
}
|
}
|
||||||
return rt, nil
|
return rt, nil
|
||||||
@ -133,22 +135,53 @@ func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) {
|
|||||||
|
|
||||||
func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
|
func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
|
||||||
|
|
||||||
|
// These correspond to the headers used in pkg/apis/authentication. We don't want the package dependency,
|
||||||
|
// but you must not change the values.
|
||||||
|
const (
|
||||||
|
// ImpersonateUserHeader is used to impersonate a particular user during an API server request
|
||||||
|
ImpersonateUserHeader = "Impersonate-User"
|
||||||
|
|
||||||
|
// ImpersonateGroupHeader is used to impersonate a particular group during an API server request.
|
||||||
|
// It can be repeated multiplied times for multiple groups.
|
||||||
|
ImpersonateGroupHeader = "Impersonate-Group"
|
||||||
|
|
||||||
|
// ImpersonateUserExtraHeaderPrefix is a prefix for a header used to impersonate an entry in the
|
||||||
|
// extra map[string][]string for user.Info. The key for the `extra` map is suffix.
|
||||||
|
// The same key can be repeated multiple times to have multiple elements in the slice under a single key.
|
||||||
|
// For instance:
|
||||||
|
// Impersonate-Extra-Foo: one
|
||||||
|
// Impersonate-Extra-Foo: two
|
||||||
|
// results in extra["Foo"] = []string{"one", "two"}
|
||||||
|
ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-"
|
||||||
|
)
|
||||||
|
|
||||||
type impersonatingRoundTripper struct {
|
type impersonatingRoundTripper struct {
|
||||||
impersonate string
|
impersonate ImpersonationConfig
|
||||||
delegate http.RoundTripper
|
delegate http.RoundTripper
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set.
|
// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set.
|
||||||
func NewImpersonatingRoundTripper(impersonate string, delegate http.RoundTripper) http.RoundTripper {
|
func NewImpersonatingRoundTripper(impersonate ImpersonationConfig, delegate http.RoundTripper) http.RoundTripper {
|
||||||
return &impersonatingRoundTripper{impersonate, delegate}
|
return &impersonatingRoundTripper{impersonate, delegate}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
if len(req.Header.Get("Impersonate-User")) != 0 {
|
// use the user header as marker for the rest.
|
||||||
|
if len(req.Header.Get(ImpersonateUserHeader)) != 0 {
|
||||||
return rt.delegate.RoundTrip(req)
|
return rt.delegate.RoundTrip(req)
|
||||||
}
|
}
|
||||||
req = cloneRequest(req)
|
req = cloneRequest(req)
|
||||||
req.Header.Set("Impersonate-User", rt.impersonate)
|
req.Header.Set(ImpersonateUserHeader, rt.impersonate.UserName)
|
||||||
|
|
||||||
|
for _, group := range rt.impersonate.Groups {
|
||||||
|
req.Header.Add(ImpersonateGroupHeader, group)
|
||||||
|
}
|
||||||
|
for k, vv := range rt.impersonate.Extra {
|
||||||
|
for _, v := range vv {
|
||||||
|
req.Header.Add(ImpersonateUserExtraHeaderPrefix+k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return rt.delegate.RoundTrip(req)
|
return rt.delegate.RoundTrip(req)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ package transport
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -99,3 +100,58 @@ func TestUserAgentRoundTripper(t *testing.T) {
|
|||||||
t.Errorf("unexpected user agent header: %#v", rt.Request)
|
t.Errorf("unexpected user agent header: %#v", rt.Request)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestImpersonationRoundTripper(t *testing.T) {
|
||||||
|
tcs := []struct {
|
||||||
|
name string
|
||||||
|
impersonationConfig ImpersonationConfig
|
||||||
|
expected map[string][]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "all",
|
||||||
|
impersonationConfig: ImpersonationConfig{
|
||||||
|
UserName: "user",
|
||||||
|
Groups: []string{"one", "two"},
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"first": {"A", "a"},
|
||||||
|
"second": {"B", "b"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: map[string][]string{
|
||||||
|
ImpersonateUserHeader: {"user"},
|
||||||
|
ImpersonateGroupHeader: {"one", "two"},
|
||||||
|
ImpersonateUserExtraHeaderPrefix + "First": {"A", "a"},
|
||||||
|
ImpersonateUserExtraHeaderPrefix + "Second": {"B", "b"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tcs {
|
||||||
|
rt := &testRoundTripper{}
|
||||||
|
req := &http.Request{
|
||||||
|
Header: make(http.Header),
|
||||||
|
}
|
||||||
|
NewImpersonatingRoundTripper(tc.impersonationConfig, rt).RoundTrip(req)
|
||||||
|
|
||||||
|
for k, v := range rt.Request.Header {
|
||||||
|
expected, ok := tc.expected[k]
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("%v missing %v=%v", tc.name, k, v)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(expected, v) {
|
||||||
|
t.Errorf("%v expected %v: %v, got %v", tc.name, k, expected, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range tc.expected {
|
||||||
|
expected, ok := rt.Request.Header[k]
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("%v missing %v=%v", tc.name, k, v)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(expected, v) {
|
||||||
|
t.Errorf("%v expected %v: %v, got %v", tc.name, k, expected, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
1
vendor/BUILD
vendored
1
vendor/BUILD
vendored
@ -10834,6 +10834,7 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//vendor:github.com/davecgh/go-spew/spew",
|
"//vendor:github.com/davecgh/go-spew/spew",
|
||||||
"//vendor:github.com/ghodss/yaml",
|
"//vendor:github.com/ghodss/yaml",
|
||||||
|
"//vendor:k8s.io/client-go/discovery",
|
||||||
"//vendor:k8s.io/client-go/pkg/third_party/forked/golang/json",
|
"//vendor:k8s.io/client-go/pkg/third_party/forked/golang/json",
|
||||||
"//vendor:k8s.io/client-go/pkg/util/json",
|
"//vendor:k8s.io/client-go/pkg/util/json",
|
||||||
],
|
],
|
||||||
|
Loading…
Reference in New Issue
Block a user